Merge pull request #4919 from sivel/4577-rax-refactor
Refactor rax module. Fixes #4577
This commit is contained in:
commit
7ebda819b5
1 changed files with 513 additions and 191 deletions
|
@ -19,82 +19,111 @@ DOCUMENTATION = '''
|
|||
module: rax
|
||||
short_description: create / delete an instance in Rackspace Public Cloud
|
||||
description:
|
||||
- creates / deletes a Rackspace Public Cloud instance and optionally waits for it to be 'running'.
|
||||
- creates / deletes a Rackspace Public Cloud instance and optionally
|
||||
waits for it to be 'running'.
|
||||
version_added: "1.2"
|
||||
options:
|
||||
service:
|
||||
description:
|
||||
- Cloud service to interact with
|
||||
choices: ['cloudservers']
|
||||
default: cloudservers
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'active', 'absent', 'deleted']
|
||||
default: present
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if C(api_key) and
|
||||
C(username) are provided)
|
||||
default: null
|
||||
aliases: ['creds_file']
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides C(credentials))
|
||||
username:
|
||||
- Rackspace API key (overrides I(credentials))
|
||||
count:
|
||||
description:
|
||||
- Rackspace username (overrides C(credentials))
|
||||
name:
|
||||
- number of instances to launch
|
||||
default: 1
|
||||
version_added: 1.4
|
||||
count_offset:
|
||||
description:
|
||||
- Name to give the instance
|
||||
- number count to start at
|
||||
default: 1
|
||||
version_added: 1.4
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if I(api_key) and
|
||||
I(username) are provided)
|
||||
default: null
|
||||
flavor:
|
||||
description:
|
||||
- flavor to use for the instance
|
||||
default: null
|
||||
image:
|
||||
description:
|
||||
- image to use for the instance
|
||||
default: null
|
||||
meta:
|
||||
description:
|
||||
- A hash of metadata to associate with the instance
|
||||
default: null
|
||||
key_name:
|
||||
description:
|
||||
- key pair to use on the instance
|
||||
default: null
|
||||
aliases: ['keypair']
|
||||
files:
|
||||
description:
|
||||
- Files to insert into the instance. remotefilename:localcontent
|
||||
default: null
|
||||
region:
|
||||
description:
|
||||
- Region to create an instance in
|
||||
default: DFW
|
||||
aliases: ['creds_file']
|
||||
disk_config:
|
||||
description:
|
||||
- Disk partitioning strategy
|
||||
- Disk partitioning strategy
|
||||
choices: ['auto', 'manual']
|
||||
version_added: '1.4'
|
||||
default: auto
|
||||
exact_count:
|
||||
description:
|
||||
- Explicitly ensure an exact count of instances, used with
|
||||
state=active/present
|
||||
default: no
|
||||
version_added: 1.4
|
||||
files:
|
||||
description:
|
||||
- Files to insert into the instance. remotefilename:localcontent
|
||||
default: null
|
||||
flavor:
|
||||
description:
|
||||
- flavor to use for the instance
|
||||
default: null
|
||||
group:
|
||||
description:
|
||||
- host group to assign to server, is also used for idempotent operations
|
||||
to ensure a specific number of instances
|
||||
version_added: 1.4
|
||||
image:
|
||||
description:
|
||||
- image to use for the instance. Can be an C(id), C(human_id) or C(name)
|
||||
default: null
|
||||
instance_ids:
|
||||
description:
|
||||
- list of instance ids, currently only used when state='absent' to
|
||||
remove instances
|
||||
version_added: 1.4
|
||||
key_name:
|
||||
description:
|
||||
- key pair to use on the instance
|
||||
default: null
|
||||
aliases: ['keypair']
|
||||
meta:
|
||||
description:
|
||||
- A hash of metadata to associate with the instance
|
||||
default: null
|
||||
name:
|
||||
description:
|
||||
- Name to give the instance
|
||||
default: null
|
||||
networks:
|
||||
description:
|
||||
- The network to attach to the instances. If specified, you must include
|
||||
ALL networks including the public and private interfaces. Can be C(id)
|
||||
or C(label).
|
||||
default: ['public', 'private']
|
||||
version_added: 1.4
|
||||
region:
|
||||
description:
|
||||
- Region to create an instance in
|
||||
default: DFW
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides I(credentials))
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
- how long before wait gives up, in seconds
|
||||
default: 300
|
||||
requirements: [ "pyrax" ]
|
||||
author: Jesse Keating
|
||||
author: Jesse Keating, Matt Martz
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS) points to a credentials file
|
||||
appropriate for pyrax
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
'''
|
||||
|
@ -102,13 +131,11 @@ notes:
|
|||
EXAMPLES = '''
|
||||
- name: Build a Cloud Server
|
||||
gather_facts: False
|
||||
|
||||
tasks:
|
||||
- name: Server build request
|
||||
local_action:
|
||||
module: rax
|
||||
credentials: ~/.raxpub
|
||||
service: cloudservers
|
||||
name: rax-test1
|
||||
flavor: 5
|
||||
image: b11d9567-e412-4255-96b9-bd63ab23bcfe
|
||||
|
@ -117,179 +144,479 @@ EXAMPLES = '''
|
|||
/root/test.txt: /home/localuser/test.txt
|
||||
wait: yes
|
||||
state: present
|
||||
networks:
|
||||
- private
|
||||
- public
|
||||
'''
|
||||
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import re
|
||||
from uuid import UUID
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
import pyrax.utils
|
||||
except ImportError:
|
||||
print("failed=True msg='pyrax required for this module'")
|
||||
print("failed=True msg='pyrax is required for this module'")
|
||||
sys.exit(1)
|
||||
|
||||
# These are possible services, but only cloudservers is supported at this time
|
||||
#SUPPORTEDSERVICES = ['cloudservers', 'cloudfiles', 'cloud_blockstorage',
|
||||
# 'cloud_databases', 'cloud_loadbalancers']
|
||||
SUPPORTEDSERVICES = ['cloudservers']
|
||||
ACTIVE_STATUSES = ('ACTIVE', 'BUILD', 'HARD_REBOOT', 'MIGRATING', 'PASSWORD',
|
||||
'REBOOT', 'REBUILD', 'RESCUE', 'RESIZE', 'REVERT_RESIZE')
|
||||
FINAL_STATUSES = ('ACTIVE', 'ERROR')
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
|
||||
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
|
||||
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
|
||||
|
||||
|
||||
def rax_slugify(value):
|
||||
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
|
||||
|
||||
|
||||
def pyrax_object_to_dict(obj):
|
||||
instance = {}
|
||||
for key in dir(obj):
|
||||
value = getattr(obj, key)
|
||||
if (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
|
||||
key = rax_slugify(key)
|
||||
instance[key] = value
|
||||
|
||||
for attr in ['id', 'accessIPv4', 'name', 'status']:
|
||||
instance[attr] = instance.get(rax_slugify(attr))
|
||||
|
||||
return instance
|
||||
|
||||
|
||||
def create(module, names, flavor, image, meta, key_name, files,
|
||||
wait, wait_timeout, disk_config, group, nics):
|
||||
|
||||
cs = pyrax.cloudservers
|
||||
changed = False
|
||||
|
||||
# Handle the file contents
|
||||
for rpath in files.keys():
|
||||
lpath = os.path.expanduser(files[rpath])
|
||||
try:
|
||||
fileobj = open(lpath, 'r')
|
||||
files[rpath] = fileobj
|
||||
except Exception, e:
|
||||
module.fail_json(msg='Failed to load %s' % lpath)
|
||||
try:
|
||||
servers = []
|
||||
for name in names:
|
||||
servers.append(cs.servers.create(name=name, image=image,
|
||||
flavor=flavor, meta=meta,
|
||||
key_name=key_name,
|
||||
files=files, nics=nics,
|
||||
disk_config=disk_config))
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
else:
|
||||
changed = True
|
||||
|
||||
if wait:
|
||||
end_time = time.time() + wait_timeout
|
||||
infinite = wait_timeout == 0
|
||||
while infinite or time.time() < end_time:
|
||||
for server in servers:
|
||||
try:
|
||||
server.get()
|
||||
except:
|
||||
server.status == 'ERROR'
|
||||
|
||||
if not filter(lambda s: s.status not in FINAL_STATUSES,
|
||||
servers):
|
||||
break
|
||||
time.sleep(5)
|
||||
|
||||
success = []
|
||||
error = []
|
||||
timeout = []
|
||||
for server in servers:
|
||||
try:
|
||||
server.get()
|
||||
except:
|
||||
server.status == 'ERROR'
|
||||
instance = pyrax_object_to_dict(server)
|
||||
if server.status == 'ACTIVE' or not wait:
|
||||
success.append(instance)
|
||||
elif server.status == 'ERROR':
|
||||
error.append(instance)
|
||||
elif wait:
|
||||
timeout.append(instance)
|
||||
|
||||
results = {
|
||||
'changed': changed,
|
||||
'action': 'create',
|
||||
'instances': success + error + timeout,
|
||||
'success': success,
|
||||
'error': error,
|
||||
'timeout': timeout,
|
||||
'instance_ids': {
|
||||
'instances': [i['id'] for i in success + error + timeout],
|
||||
'success': [i['id'] for i in success],
|
||||
'error': [i['id'] for i in error],
|
||||
'timeout': [i['id'] for i in timeout]
|
||||
}
|
||||
}
|
||||
|
||||
if timeout:
|
||||
results['msg'] = 'Timeout waiting for all servers to build'
|
||||
elif error:
|
||||
results['msg'] = 'Failed to build all servers'
|
||||
|
||||
if 'msg' in results:
|
||||
module.fail_json(**results)
|
||||
else:
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
def delete(module, instance_ids, wait, wait_timeout):
|
||||
cs = pyrax.cloudservers
|
||||
|
||||
changed = False
|
||||
instances = {}
|
||||
servers = []
|
||||
|
||||
for instance_id in instance_ids:
|
||||
servers.append(cs.servers.get(instance_id))
|
||||
|
||||
for server in servers:
|
||||
try:
|
||||
server.delete()
|
||||
except Exception, e:
|
||||
module.fail_json(msg=e.message)
|
||||
else:
|
||||
changed = True
|
||||
|
||||
instance = pyrax_object_to_dict(server)
|
||||
instances[instance['id']] = instance
|
||||
|
||||
# If requested, wait for server deletion
|
||||
if wait:
|
||||
end_time = time.time() + wait_timeout
|
||||
infinite = wait_timeout == 0
|
||||
while infinite or time.time() < end_time:
|
||||
for server in servers:
|
||||
instance_id = server.id
|
||||
try:
|
||||
server.get()
|
||||
except:
|
||||
instances[instance_id]['status'] = 'DELETED'
|
||||
|
||||
if not filter(lambda s: s['status'] not in ('', 'DELETED',
|
||||
'ERROR'),
|
||||
instances.values()):
|
||||
break
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
|
||||
instances.values())
|
||||
error = filter(lambda s: s['status'] in ('ERROR'),
|
||||
instances.values())
|
||||
success = filter(lambda s: s['status'] in ('', 'DELETED'),
|
||||
instances.values())
|
||||
|
||||
results = {
|
||||
'changed': changed,
|
||||
'action': 'delete',
|
||||
'instances': success + error + timeout,
|
||||
'success': success,
|
||||
'error': error,
|
||||
'timeout': timeout,
|
||||
'instance_ids': {
|
||||
'instances': [i['id'] for i in success + error + timeout],
|
||||
'success': [i['id'] for i in success],
|
||||
'error': [i['id'] for i in error],
|
||||
'timeout': [i['id'] for i in timeout]
|
||||
}
|
||||
}
|
||||
|
||||
if timeout:
|
||||
results['msg'] = 'Timeout waiting for all servers to delete'
|
||||
elif error:
|
||||
results['msg'] = 'Failed to delete all servers'
|
||||
|
||||
if 'msg' in results:
|
||||
module.fail_json(**results)
|
||||
else:
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
def cloudservers(module, state, name, flavor, image, meta, key_name, files,
|
||||
wait, wait_timeout, disk_config):
|
||||
# Check our args (this could be done better)
|
||||
for arg in (state, name, flavor, image):
|
||||
if not arg:
|
||||
module.fail_json(msg='%s is required for cloudservers' % arg)
|
||||
|
||||
instances = []
|
||||
changed = False
|
||||
wait, wait_timeout, disk_config, count, group,
|
||||
instance_ids, exact_count, networks, count_offset):
|
||||
cs = pyrax.cloudservers
|
||||
cnw = pyrax.cloud_networks
|
||||
servers = []
|
||||
# See if we can find servers that match our options
|
||||
for server in pyrax.cloudservers.list():
|
||||
if name != server.name:
|
||||
continue
|
||||
if int(flavor) != int(server.flavor['id']):
|
||||
continue
|
||||
if image != server.image['id']:
|
||||
continue
|
||||
if meta != server.metadata:
|
||||
continue
|
||||
# Nothing else ruled us not a match, so consider it a winner
|
||||
servers.append(server)
|
||||
|
||||
# Add the group meta key
|
||||
if group and 'group' not in meta:
|
||||
meta['group'] = group
|
||||
elif 'group' in meta and group is None:
|
||||
group = meta['group']
|
||||
|
||||
# Check if the provided image is a UUID and if not, search for an
|
||||
# appropriate image using human_id and name
|
||||
if image:
|
||||
try:
|
||||
UUID(image)
|
||||
except ValueError:
|
||||
try:
|
||||
image = cs.images.find(human_id=image)
|
||||
except (pyrax.exceptions.NotFound,
|
||||
pyrax.exceptions.NoUniqueMatch):
|
||||
try:
|
||||
image = cs.images.find(name=image)
|
||||
except (pyrax.exceptions.NotFound,
|
||||
pyrax.exceptions.NoUniqueMatch):
|
||||
module.fail_json(msg='No matching image found (%s)' %
|
||||
image)
|
||||
|
||||
image = pyrax.utils.get_id(image)
|
||||
|
||||
# Check if the provided network is a UUID and if not, search for an
|
||||
# appropriate network using label
|
||||
nics = []
|
||||
if networks:
|
||||
for network in networks:
|
||||
try:
|
||||
UUID(network)
|
||||
except ValueError:
|
||||
if network.lower() == 'public':
|
||||
nics.extend(cnw.get_server_networks(PUBLIC_NET_ID))
|
||||
elif network.lower() == 'private':
|
||||
nics.extend(cnw.get_server_networks(SERVICE_NET_ID))
|
||||
else:
|
||||
try:
|
||||
network_obj = cnw.find_network_by_label(network)
|
||||
except (pyrax.exceptions.NetworkNotFound,
|
||||
pyrax.exceptions.NetworkLabelNotUnique):
|
||||
module.fail_json(msg='No matching network found (%s)' %
|
||||
network)
|
||||
else:
|
||||
nics.extend(cnw.get_server_networks(network_obj))
|
||||
else:
|
||||
nics.extend(cnw.get_server_networks(network))
|
||||
|
||||
# act on the state
|
||||
if state in ('active', 'present'):
|
||||
if not servers:
|
||||
# Handle the file contents
|
||||
for rpath in files.keys():
|
||||
lpath = os.path.expanduser(files[rpath])
|
||||
if state == 'present':
|
||||
for arg, value in dict(name=name, flavor=flavor,
|
||||
image=image).iteritems():
|
||||
if not value:
|
||||
module.fail_json(msg='%s is required for the "rax" module' %
|
||||
arg)
|
||||
|
||||
# Idempotent ensurance of a specific count of servers
|
||||
if exact_count is not False:
|
||||
# See if we can find servers that match our options
|
||||
if group is None:
|
||||
module.fail_json(msg='"group" must be provided when using '
|
||||
'"exact_count"')
|
||||
else:
|
||||
numbers = set()
|
||||
|
||||
try:
|
||||
fileobj = open(lpath, 'r')
|
||||
files[rpath] = fileobj
|
||||
except Exception, e:
|
||||
module.fail_json(msg = 'Failed to load %s' % lpath)
|
||||
try:
|
||||
servers = [pyrax.cloudservers.servers.create(name=name,
|
||||
image=image,
|
||||
flavor=flavor,
|
||||
key_name=key_name,
|
||||
meta=meta,
|
||||
files=files,
|
||||
disk_config=disk_config)]
|
||||
changed = True
|
||||
except Exception, e:
|
||||
module.fail_json(msg = '%s' % e.message)
|
||||
name % 0
|
||||
except TypeError, e:
|
||||
if e.message.startswith('not all'):
|
||||
name = '%s%%d' % name
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
for server in servers:
|
||||
# If requested, wait for server activation
|
||||
if wait:
|
||||
pyrax.utils.wait_until(server, 'status', ('ACTIVE', 'ERROR'),
|
||||
interval=5, attempts=wait_timeout/5)
|
||||
pattern = re.sub(r'%\d+[sd]', r'(\d+)', name)
|
||||
for server in cs.servers.list():
|
||||
if server.metadata.get('group') == group:
|
||||
servers.append(server)
|
||||
match = re.search(pattern, server.name)
|
||||
if match:
|
||||
number = int(match.group(1))
|
||||
numbers.add(number)
|
||||
|
||||
# Get a fresh copy of the server details
|
||||
server.get()
|
||||
if server.status == 'ACTIVE':
|
||||
instance = {'id': server.id,
|
||||
'accessIPv4': server.accessIPv4,
|
||||
'name': server.name,
|
||||
'status': server.status}
|
||||
instances.append(instance)
|
||||
elif server.status == 'ERROR':
|
||||
module.fail_json(msg = '%s failed to build' % server.id)
|
||||
elif wait:
|
||||
# waiting took too long
|
||||
module.fail_json(msg = 'Timeout waiting on %s' % server.id)
|
||||
number_range = xrange(count_offset, count_offset + count)
|
||||
available_numbers = list(set(number_range).difference(numbers))
|
||||
if len(servers) > count:
|
||||
state = 'absent'
|
||||
del servers[:count]
|
||||
instance_ids = []
|
||||
for server in servers:
|
||||
instance_ids.append(server.id)
|
||||
delete(module, instance_ids, wait, wait_timeout)
|
||||
elif len(servers) < count:
|
||||
names = []
|
||||
numbers_to_use = available_numbers[:count - len(servers)]
|
||||
for number in numbers_to_use:
|
||||
names.append(name % number)
|
||||
else:
|
||||
module.exit_json(changed=False, action=None, instances=[],
|
||||
success=[], error=[], timeout=[],
|
||||
instance_ids={'instances': [],
|
||||
'success': [], 'error': [],
|
||||
'timeout': []})
|
||||
|
||||
else:
|
||||
if group is not None:
|
||||
numbers = set()
|
||||
|
||||
elif state in ('absent', 'deleted'):
|
||||
# See if we can find a server that matches our credentials
|
||||
for server in servers:
|
||||
if server.name == name:
|
||||
if int(server.flavor['id']) == int(flavor) and \
|
||||
server.image['id'] == image and \
|
||||
server.metadata == meta:
|
||||
try:
|
||||
server.delete()
|
||||
except Exception, e:
|
||||
module.fail_json(msg = e.message)
|
||||
try:
|
||||
name % 0
|
||||
except TypeError, e:
|
||||
if e.message.startswith('not all'):
|
||||
name = '%s%%d' % name
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
instance = {'id': server.id,
|
||||
'accessIPv4': server.accessIPv4,
|
||||
'name': server.name,
|
||||
'status': 'DELETING'}
|
||||
pattern = re.sub(r'%\d+[sd]', r'(\d+)', name)
|
||||
for server in cs.servers.list():
|
||||
if server.metadata.get('group') == group:
|
||||
servers.append(server)
|
||||
match = re.search(pattern, server.name)
|
||||
if match:
|
||||
number = int(match.group(1))
|
||||
numbers.add(number)
|
||||
|
||||
# If requested, wait for server deletion
|
||||
if wait:
|
||||
try:
|
||||
pyrax.utils.wait_until(server, 'status', '', interval=5,
|
||||
attempts=wait_timeout/5)
|
||||
# Get a fresh copy of the server details
|
||||
server.get()
|
||||
except Exception, e:
|
||||
# In this case, an exception means the server is NotFound
|
||||
instance['status'] = 'DELETED'
|
||||
else:
|
||||
# waiting took too long
|
||||
module.fail_json(msg = 'Timeout waiting on delete %s (%s)' % (server.id, server.status))
|
||||
number_range = xrange(count_offset,
|
||||
count_offset + count + len(numbers))
|
||||
available_numbers = list(set(number_range).difference(numbers))
|
||||
names = []
|
||||
numbers_to_use = available_numbers[:count]
|
||||
for number in numbers_to_use:
|
||||
names.append(name % number)
|
||||
else:
|
||||
search_opts = {
|
||||
'name': name,
|
||||
'image': image,
|
||||
'flavor': flavor
|
||||
}
|
||||
servers = []
|
||||
for server in cs.servers.list(search_opts=search_opts):
|
||||
if server.metadata != meta:
|
||||
continue
|
||||
servers.append(server)
|
||||
|
||||
instances.append(instance)
|
||||
changed = True
|
||||
if len(servers) >= count:
|
||||
instances = []
|
||||
for server in servers:
|
||||
instances.append(pyrax_object_to_dict(server))
|
||||
|
||||
instance_ids = [i['id'] for i in instances]
|
||||
module.exit_json(changed=False, action=None,
|
||||
instances=instances, success=[], error=[],
|
||||
timeout=[],
|
||||
instance_ids={'instances': instance_ids,
|
||||
'success': [], 'error': [],
|
||||
'timeout': []})
|
||||
|
||||
names = [name] * (count - len(servers))
|
||||
|
||||
create(module, names, flavor, image, meta, key_name, files,
|
||||
wait, wait_timeout, disk_config, group, nics)
|
||||
|
||||
elif state == 'absent':
|
||||
if instance_ids is None:
|
||||
for arg, value in dict(name=name, flavor=flavor,
|
||||
image=image).iteritems():
|
||||
if not value:
|
||||
module.fail_json(msg='%s is required for the "rax" '
|
||||
'module' % arg)
|
||||
search_opts = {
|
||||
'name': name,
|
||||
'image': image,
|
||||
'flavor': flavor
|
||||
}
|
||||
for server in cs.servers.list(search_opts=search_opts):
|
||||
if meta != server.metadata:
|
||||
continue
|
||||
servers.append(server)
|
||||
|
||||
instance_ids = []
|
||||
for server in servers:
|
||||
if len(instance_ids) < count:
|
||||
instance_ids.append(server.id)
|
||||
else:
|
||||
break
|
||||
|
||||
if not instance_ids:
|
||||
module.exit_json(changed=False, action=None, instances=[],
|
||||
success=[], error=[], timeout=[],
|
||||
instance_ids={'instances': [],
|
||||
'success': [], 'error': [],
|
||||
'timeout': []})
|
||||
|
||||
delete(module, instance_ids, wait, wait_timeout)
|
||||
|
||||
module.exit_json(changed=changed, instances=instances)
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
service = dict(default='cloudservers', choices=SUPPORTEDSERVICES),
|
||||
state = dict(default='present', choices=['active', 'present',
|
||||
'deleted', 'absent']),
|
||||
credentials = dict(aliases = ['creds_file']),
|
||||
argument_spec=dict(
|
||||
api_key=dict(),
|
||||
count=dict(default=1, type='int'),
|
||||
count_offset=dict(default=1, type='int'),
|
||||
credentials=dict(aliases=['creds_file']),
|
||||
disk_config=dict(default='auto', choices=['auto', 'manual']),
|
||||
exact_count=dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
files=dict(type='dict', default={}),
|
||||
flavor=dict(),
|
||||
group=dict(),
|
||||
image=dict(),
|
||||
instance_ids=dict(type='list'),
|
||||
key_name=dict(aliases=['keypair']),
|
||||
meta=dict(type='dict', default={}),
|
||||
name=dict(),
|
||||
networks=dict(type='list', default=['public', 'private']),
|
||||
region=dict(),
|
||||
service=dict(),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
username=dict(),
|
||||
name = dict(),
|
||||
flavor = dict(),
|
||||
image = dict(),
|
||||
disk_config = dict(default='auto', choices=['auto', 'manual']),
|
||||
meta = dict(type='dict', default={}),
|
||||
key_name = dict(aliases = ['keypair']),
|
||||
files = dict(type='dict', default={}),
|
||||
region = dict(),
|
||||
wait = dict(type='bool'),
|
||||
wait_timeout = dict(default=300),
|
||||
)
|
||||
wait=dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
wait_timeout=dict(default=300),
|
||||
),
|
||||
)
|
||||
|
||||
service = module.params.get('service')
|
||||
state = module.params.get('state')
|
||||
credentials = module.params.get('credentials')
|
||||
|
||||
if service is not None:
|
||||
module.fail_json(msg='The "service" attribute has been deprecated, '
|
||||
'please remove "service: cloudservers" from your '
|
||||
'playbook pertaining to the "rax" module')
|
||||
|
||||
api_key = module.params.get('api_key')
|
||||
username = module.params.get('username')
|
||||
name = module.params.get('name')
|
||||
flavor = module.params.get('flavor')
|
||||
image = module.params.get('image')
|
||||
meta = module.params.get('meta')
|
||||
key_name = module.params.get('key_name')
|
||||
count = module.params.get('count')
|
||||
count_offset = module.params.get('count_offset')
|
||||
credentials = module.params.get('credentials')
|
||||
disk_config = module.params.get('disk_config').upper()
|
||||
exact_count = module.params.get('exact_count', False)
|
||||
files = module.params.get('files')
|
||||
flavor = module.params.get('flavor')
|
||||
group = module.params.get('group')
|
||||
image = module.params.get('image')
|
||||
instance_ids = module.params.get('instance_ids')
|
||||
key_name = module.params.get('key_name')
|
||||
meta = module.params.get('meta')
|
||||
name = module.params.get('name')
|
||||
networks = module.params.get('networks')
|
||||
region = module.params.get('region')
|
||||
state = module.params.get('state')
|
||||
username = module.params.get('username')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
disk_config = module.params.get('disk_config').upper()
|
||||
|
||||
# Setup the credentials and region
|
||||
try:
|
||||
username = username or os.environ.get('RAX_USERNAME')
|
||||
api_key = api_key or os.environ.get('RAX_API_KEY')
|
||||
credentials = credentials or os.environ.get('RAX_CREDENTIALS') or \
|
||||
os.environ.get('RAX_CREDS_FILE')
|
||||
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
|
||||
os.environ.get('RAX_CREDS_FILE'))
|
||||
region = region or os.environ.get('RAX_REGION')
|
||||
|
||||
except KeyError, e:
|
||||
module.fail_json(msg = 'Unable to load %s' % e.message)
|
||||
module.fail_json(msg='Unable to load %s' % e.message)
|
||||
|
||||
# setup the auth
|
||||
try:
|
||||
pyrax.set_setting("identity_type", "rackspace")
|
||||
pyrax.set_setting('identity_type', 'rackspace')
|
||||
if api_key and username:
|
||||
pyrax.set_credentials(username, api_key=api_key, region=region)
|
||||
elif credentials:
|
||||
|
@ -298,19 +625,14 @@ def main():
|
|||
else:
|
||||
raise Exception('No credentials supplied!')
|
||||
except Exception, e:
|
||||
module.fail_json(msg = '%s' % e.message)
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
||||
# Act based on service
|
||||
if service == 'cloudservers':
|
||||
cloudservers(module, state, name, flavor, image, meta, key_name, files,
|
||||
wait, wait_timeout, disk_config)
|
||||
elif service in ['cloudfiles', 'cloud_blockstorage',
|
||||
'cloud_databases', 'cloud_loadbalancers']:
|
||||
module.fail_json(msg = 'Service %s is not supported at this time' %
|
||||
service)
|
||||
cloudservers(module, state, name, flavor, image, meta, key_name, files,
|
||||
wait, wait_timeout, disk_config, count, group,
|
||||
instance_ids, exact_count, networks, count_offset)
|
||||
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
### invoke the module
|
||||
main()
|
||||
|
|
Loading…
Reference in a new issue