This reverts commit 4373b155a5
.
This commit is contained in:
parent
3f83e7e473
commit
882f249f1d
18 changed files with 1881 additions and 538 deletions
|
@ -19,17 +19,31 @@
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import copy
|
import copy
|
||||||
|
import json
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
from ansible.module_utils.six import iteritems
|
from ansible.module_utils.six import iteritems
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
|
|
||||||
|
from ansible.module_utils.k8s.helper import\
|
||||||
|
AnsibleMixin,\
|
||||||
|
HAS_STRING_UTILS
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import kubernetes
|
from openshift.helper.kubernetes import KubernetesObjectHelper
|
||||||
from openshift.dynamic import DynamicClient
|
from openshift.helper.openshift import OpenShiftObjectHelper
|
||||||
|
from openshift.helper.exceptions import KubernetesException
|
||||||
HAS_K8S_MODULE_HELPER = True
|
HAS_K8S_MODULE_HELPER = True
|
||||||
except ImportError:
|
except ImportError as exc:
|
||||||
|
class KubernetesObjectHelper(object):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class OpenShiftObjectHelper(object):
|
||||||
|
pass
|
||||||
|
|
||||||
HAS_K8S_MODULE_HELPER = False
|
HAS_K8S_MODULE_HELPER = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -38,188 +52,52 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_YAML = False
|
HAS_YAML = False
|
||||||
|
|
||||||
try:
|
|
||||||
import dictdiffer
|
|
||||||
HAS_DICTDIFFER = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_DICTDIFFER = False
|
|
||||||
|
|
||||||
try:
|
def remove_secret_data(obj_dict):
|
||||||
import urllib3
|
""" Remove any sensitive data from a K8s dict"""
|
||||||
urllib3.disable_warnings()
|
if obj_dict.get('data'):
|
||||||
except ImportError:
|
# Secret data
|
||||||
|
obj_dict.pop('data')
|
||||||
|
if obj_dict.get('string_data'):
|
||||||
|
# The API should not return sting_data in Secrets, but just in case
|
||||||
|
obj_dict.pop('string_data')
|
||||||
|
if obj_dict['metadata'].get('annotations'):
|
||||||
|
# Remove things like 'openshift.io/token-secret' from metadata
|
||||||
|
for key in [k for k in obj_dict['metadata']['annotations'] if 'secret' in k]:
|
||||||
|
obj_dict['metadata']['annotations'].pop(key)
|
||||||
|
|
||||||
|
|
||||||
|
def to_snake(name):
|
||||||
|
""" Convert a string from camel to snake """
|
||||||
|
if not name:
|
||||||
|
return name
|
||||||
|
|
||||||
|
def _replace(m):
|
||||||
|
m = m.group(0)
|
||||||
|
return m[0] + '_' + m[1:]
|
||||||
|
|
||||||
|
p = r'[a-z][A-Z]|' \
|
||||||
|
r'[A-Z]{2}[a-z]'
|
||||||
|
return re.sub(p, _replace, name).lower()
|
||||||
|
|
||||||
|
|
||||||
|
class DateTimeEncoder(json.JSONEncoder):
|
||||||
|
# When using json.dumps() with K8s object, pass cls=DateTimeEncoder to handle any datetime objects
|
||||||
|
def default(self, o):
|
||||||
|
if isinstance(o, datetime):
|
||||||
|
return o.isoformat()
|
||||||
|
return json.JSONEncoder.default(self, o)
|
||||||
|
|
||||||
|
|
||||||
|
class KubernetesAnsibleModuleHelper(AnsibleMixin, KubernetesObjectHelper):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
ARG_ATTRIBUTES_BLACKLIST = ('property_path',)
|
|
||||||
|
|
||||||
COMMON_ARG_SPEC = {
|
class KubernetesAnsibleModule(AnsibleModule):
|
||||||
'state': {
|
|
||||||
'default': 'present',
|
|
||||||
'choices': ['present', 'absent'],
|
|
||||||
},
|
|
||||||
'force': {
|
|
||||||
'type': 'bool',
|
|
||||||
'default': False,
|
|
||||||
},
|
|
||||||
'resource_definition': {
|
|
||||||
'type': 'dict',
|
|
||||||
'aliases': ['definition', 'inline']
|
|
||||||
},
|
|
||||||
'src': {
|
|
||||||
'type': 'path',
|
|
||||||
},
|
|
||||||
'kind': {},
|
|
||||||
'name': {},
|
|
||||||
'namespace': {},
|
|
||||||
'api_version': {
|
|
||||||
'default': 'v1',
|
|
||||||
'aliases': ['api', 'version'],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
AUTH_ARG_SPEC = {
|
|
||||||
'kubeconfig': {
|
|
||||||
'type': 'path',
|
|
||||||
},
|
|
||||||
'context': {},
|
|
||||||
'host': {},
|
|
||||||
'api_key': {
|
|
||||||
'no_log': True,
|
|
||||||
},
|
|
||||||
'username': {},
|
|
||||||
'password': {
|
|
||||||
'no_log': True,
|
|
||||||
},
|
|
||||||
'verify_ssl': {
|
|
||||||
'type': 'bool',
|
|
||||||
},
|
|
||||||
'ssl_ca_cert': {
|
|
||||||
'type': 'path',
|
|
||||||
},
|
|
||||||
'cert_file': {
|
|
||||||
'type': 'path',
|
|
||||||
},
|
|
||||||
'key_file': {
|
|
||||||
'type': 'path',
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class K8sAnsibleMixin(object):
|
|
||||||
_argspec_cache = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def argspec(self):
|
|
||||||
"""
|
|
||||||
Introspect the model properties, and return an Ansible module arg_spec dict.
|
|
||||||
:return: dict
|
|
||||||
"""
|
|
||||||
if self._argspec_cache:
|
|
||||||
return self._argspec_cache
|
|
||||||
argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
|
|
||||||
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
|
|
||||||
self._argspec_cache = argument_spec
|
|
||||||
return self._argspec_cache
|
|
||||||
|
|
||||||
def get_api_client(self, **auth):
|
|
||||||
auth_args = AUTH_ARG_SPEC.keys()
|
|
||||||
|
|
||||||
auth = auth or getattr(self, 'params', {})
|
|
||||||
|
|
||||||
configuration = kubernetes.client.Configuration()
|
|
||||||
for key, value in iteritems(auth):
|
|
||||||
if key in auth_args and value is not None:
|
|
||||||
if key == 'api_key':
|
|
||||||
setattr(configuration, key, {'authorization': "Bearer {0}".format(value)})
|
|
||||||
else:
|
|
||||||
setattr(configuration, key, value)
|
|
||||||
elif key in auth_args and value is None:
|
|
||||||
env_value = os.getenv('K8S_AUTH_{0}'.format(key.upper()), None)
|
|
||||||
if env_value is not None:
|
|
||||||
setattr(configuration, key, env_value)
|
|
||||||
|
|
||||||
kubernetes.client.Configuration.set_default(configuration)
|
|
||||||
|
|
||||||
if auth.get('username') and auth.get('password') and auth.get('host'):
|
|
||||||
auth_method = 'params'
|
|
||||||
elif auth.get('api_key') and auth.get('host'):
|
|
||||||
auth_method = 'params'
|
|
||||||
elif auth.get('kubeconfig') or auth.get('context'):
|
|
||||||
auth_method = 'file'
|
|
||||||
else:
|
|
||||||
auth_method = 'default'
|
|
||||||
|
|
||||||
# First try to do incluster config, then kubeconfig
|
|
||||||
if auth_method == 'default':
|
|
||||||
try:
|
|
||||||
kubernetes.config.load_incluster_config()
|
|
||||||
return DynamicClient(kubernetes.client.ApiClient())
|
|
||||||
except kubernetes.config.ConfigException:
|
|
||||||
return DynamicClient(self.client_from_kubeconfig(auth.get('kubeconfig'), auth.get('context')))
|
|
||||||
|
|
||||||
if auth_method == 'file':
|
|
||||||
return DynamicClient(self.client_from_kubeconfig(auth.get('kubeconfig'), auth.get('context')))
|
|
||||||
|
|
||||||
if auth_method == 'params':
|
|
||||||
return DynamicClient(kubernetes.client.ApiClient(configuration))
|
|
||||||
|
|
||||||
def client_from_kubeconfig(self, config_file, context):
|
|
||||||
try:
|
|
||||||
return kubernetes.config.new_client_from_config(config_file, context)
|
|
||||||
except (IOError, kubernetes.config.ConfigException):
|
|
||||||
# If we failed to load the default config file then we'll return
|
|
||||||
# an empty configuration
|
|
||||||
# If one was specified, we will crash
|
|
||||||
if not config_file:
|
|
||||||
return kubernetes.client.ApiClient()
|
|
||||||
raise
|
|
||||||
|
|
||||||
def remove_aliases(self):
|
|
||||||
"""
|
|
||||||
The helper doesn't know what to do with aliased keys
|
|
||||||
"""
|
|
||||||
for k, v in iteritems(self.argspec):
|
|
||||||
if 'aliases' in v:
|
|
||||||
for alias in v['aliases']:
|
|
||||||
if alias in self.params:
|
|
||||||
self.params.pop(alias)
|
|
||||||
|
|
||||||
def load_resource_definitions(self, src):
|
|
||||||
""" Load the requested src path """
|
|
||||||
result = None
|
|
||||||
path = os.path.normpath(src)
|
|
||||||
if not os.path.exists(path):
|
|
||||||
self.fail_json(msg="Error accessing {0}. Does the file exist?".format(path))
|
|
||||||
try:
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
result = list(yaml.safe_load_all(f))
|
|
||||||
except (IOError, yaml.YAMLError) as exc:
|
|
||||||
self.fail_json(msg="Error loading resource_definition: {0}".format(exc))
|
|
||||||
return result
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def diff_objects(existing, new):
|
|
||||||
if not HAS_DICTDIFFER:
|
|
||||||
return False, []
|
|
||||||
|
|
||||||
def get_shared_attrs(o1, o2):
|
|
||||||
shared_attrs = {}
|
|
||||||
for k, v in o2.items():
|
|
||||||
if isinstance(v, dict):
|
|
||||||
shared_attrs[k] = get_shared_attrs(o1.get(k, {}), v)
|
|
||||||
else:
|
|
||||||
shared_attrs[k] = o1.get(k)
|
|
||||||
return shared_attrs
|
|
||||||
|
|
||||||
diffs = list(dictdiffer.diff(new, get_shared_attrs(existing, new)))
|
|
||||||
match = len(diffs) == 0
|
|
||||||
return match, diffs
|
|
||||||
|
|
||||||
|
|
||||||
class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin):
|
|
||||||
resource_definition = None
|
resource_definition = None
|
||||||
api_version = None
|
api_version = None
|
||||||
kind = None
|
kind = None
|
||||||
|
helper = None
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
|
|
||||||
|
@ -232,5 +110,114 @@ class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin):
|
||||||
if not HAS_YAML:
|
if not HAS_YAML:
|
||||||
self.fail_json(msg="This module requires PyYAML. Try `pip install PyYAML`")
|
self.fail_json(msg="This module requires PyYAML. Try `pip install PyYAML`")
|
||||||
|
|
||||||
|
if not HAS_STRING_UTILS:
|
||||||
|
self.fail_json(msg="This module requires Python string utils. Try `pip install python-string-utils`")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def argspec(self):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def get_helper(self, api_version, kind):
|
||||||
|
try:
|
||||||
|
helper = KubernetesAnsibleModuleHelper(api_version=api_version, kind=kind, debug=False)
|
||||||
|
helper.get_model(api_version, kind)
|
||||||
|
return helper
|
||||||
|
except KubernetesException as exc:
|
||||||
|
self.fail_json(msg="Error initializing module helper: {0}".format(exc.message))
|
||||||
|
|
||||||
def execute_module(self):
|
def execute_module(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def exit_json(self, **return_attributes):
|
||||||
|
""" Filter any sensitive data that we don't want logged """
|
||||||
|
if return_attributes.get('result') and \
|
||||||
|
return_attributes['result'].get('kind') in ('Secret', 'SecretList'):
|
||||||
|
if return_attributes['result'].get('data'):
|
||||||
|
remove_secret_data(return_attributes['result'])
|
||||||
|
elif return_attributes['result'].get('items'):
|
||||||
|
for item in return_attributes['result']['items']:
|
||||||
|
remove_secret_data(item)
|
||||||
|
super(KubernetesAnsibleModule, self).exit_json(**return_attributes)
|
||||||
|
|
||||||
|
def authenticate(self):
|
||||||
|
try:
|
||||||
|
auth_options = {}
|
||||||
|
auth_args = ('host', 'api_key', 'kubeconfig', 'context', 'username', 'password',
|
||||||
|
'cert_file', 'key_file', 'ssl_ca_cert', 'verify_ssl')
|
||||||
|
for key, value in iteritems(self.params):
|
||||||
|
if key in auth_args and value is not None:
|
||||||
|
auth_options[key] = value
|
||||||
|
self.helper.set_client_config(**auth_options)
|
||||||
|
except KubernetesException as e:
|
||||||
|
self.fail_json(msg='Error loading config', error=str(e))
|
||||||
|
|
||||||
|
def remove_aliases(self):
|
||||||
|
"""
|
||||||
|
The helper doesn't know what to do with aliased keys
|
||||||
|
"""
|
||||||
|
for k, v in iteritems(self.argspec):
|
||||||
|
if 'aliases' in v:
|
||||||
|
for alias in v['aliases']:
|
||||||
|
if alias in self.params:
|
||||||
|
self.params.pop(alias)
|
||||||
|
|
||||||
|
def load_resource_definition(self, src):
|
||||||
|
""" Load the requested src path """
|
||||||
|
result = None
|
||||||
|
path = os.path.normpath(src)
|
||||||
|
if not os.path.exists(path):
|
||||||
|
self.fail_json(msg="Error accessing {0}. Does the file exist?".format(path))
|
||||||
|
try:
|
||||||
|
result = yaml.safe_load(open(path, 'r'))
|
||||||
|
except (IOError, yaml.YAMLError) as exc:
|
||||||
|
self.fail_json(msg="Error loading resource_definition: {0}".format(exc))
|
||||||
|
return result
|
||||||
|
|
||||||
|
def resource_to_parameters(self, resource):
|
||||||
|
""" Converts a resource definition to module parameters """
|
||||||
|
parameters = {}
|
||||||
|
for key, value in iteritems(resource):
|
||||||
|
if key in ('apiVersion', 'kind', 'status'):
|
||||||
|
continue
|
||||||
|
elif key == 'metadata' and isinstance(value, dict):
|
||||||
|
for meta_key, meta_value in iteritems(value):
|
||||||
|
if meta_key in ('name', 'namespace', 'labels', 'annotations'):
|
||||||
|
parameters[meta_key] = meta_value
|
||||||
|
elif key in self.helper.argspec and value is not None:
|
||||||
|
parameters[key] = value
|
||||||
|
elif isinstance(value, dict):
|
||||||
|
self._add_parameter(value, [to_snake(key)], parameters)
|
||||||
|
return parameters
|
||||||
|
|
||||||
|
def _add_parameter(self, request, path, parameters):
|
||||||
|
for key, value in iteritems(request):
|
||||||
|
if path:
|
||||||
|
param_name = '_'.join(path + [to_snake(key)])
|
||||||
|
else:
|
||||||
|
param_name = to_snake(key)
|
||||||
|
if param_name in self.helper.argspec and value is not None:
|
||||||
|
parameters[param_name] = value
|
||||||
|
elif isinstance(value, dict):
|
||||||
|
continue_path = copy.copy(path) if path else []
|
||||||
|
continue_path.append(to_snake(key))
|
||||||
|
self._add_parameter(value, continue_path, parameters)
|
||||||
|
else:
|
||||||
|
self.fail_json(
|
||||||
|
msg=("Error parsing resource definition. Encountered {0}, which does not map to a parameter "
|
||||||
|
"expected by the OpenShift Python module.".format(param_name))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenShiftAnsibleModuleHelper(AnsibleMixin, OpenShiftObjectHelper):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class OpenShiftAnsibleModuleMixin(object):
|
||||||
|
|
||||||
|
def get_helper(self, api_version, kind):
|
||||||
|
try:
|
||||||
|
helper = OpenShiftAnsibleModuleHelper(api_version=api_version, kind=kind, debug=False)
|
||||||
|
helper.get_model(api_version, kind)
|
||||||
|
return helper
|
||||||
|
except KubernetesException as exc:
|
||||||
|
self.fail_json(msg="Error initializing module helper: {0}".format(exc.message))
|
||||||
|
|
633
lib/ansible/module_utils/k8s/helper.py
Normal file
633
lib/ansible/module_utils/k8s/helper.py
Normal file
|
@ -0,0 +1,633 @@
|
||||||
|
#
|
||||||
|
# Copyright 2018 Red Hat | Ansible
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from ansible.module_utils.six import iteritems, string_types
|
||||||
|
from keyword import kwlist
|
||||||
|
|
||||||
|
try:
|
||||||
|
from openshift.helper import PRIMITIVES
|
||||||
|
from openshift.helper.exceptions import KubernetesException
|
||||||
|
HAS_K8S_MODULE_HELPER = True
|
||||||
|
except ImportError as exc:
|
||||||
|
HAS_K8S_MODULE_HELPER = False
|
||||||
|
|
||||||
|
# TODO Remove string_utils dependency
|
||||||
|
try:
|
||||||
|
import string_utils
|
||||||
|
HAS_STRING_UTILS = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_STRING_UTILS = False
|
||||||
|
|
||||||
|
|
||||||
|
ARG_ATTRIBUTES_BLACKLIST = ('property_path',)
|
||||||
|
PYTHON_KEYWORD_MAPPING = dict(zip(['_{0}'.format(item) for item in kwlist], kwlist))
|
||||||
|
PYTHON_KEYWORD_MAPPING.update(dict([reversed(item) for item in iteritems(PYTHON_KEYWORD_MAPPING)]))
|
||||||
|
|
||||||
|
COMMON_ARG_SPEC = {
|
||||||
|
'state': {
|
||||||
|
'default': 'present',
|
||||||
|
'choices': ['present', 'absent'],
|
||||||
|
},
|
||||||
|
'force': {
|
||||||
|
'type': 'bool',
|
||||||
|
'default': False,
|
||||||
|
},
|
||||||
|
'resource_definition': {
|
||||||
|
'type': 'dict',
|
||||||
|
'aliases': ['definition', 'inline']
|
||||||
|
},
|
||||||
|
'src': {
|
||||||
|
'type': 'path',
|
||||||
|
},
|
||||||
|
'kind': {},
|
||||||
|
'name': {},
|
||||||
|
'namespace': {},
|
||||||
|
'api_version': {
|
||||||
|
'default': 'v1',
|
||||||
|
'aliases': ['api', 'version'],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
AUTH_ARG_SPEC = {
|
||||||
|
'kubeconfig': {
|
||||||
|
'type': 'path',
|
||||||
|
},
|
||||||
|
'context': {},
|
||||||
|
'host': {},
|
||||||
|
'api_key': {
|
||||||
|
'no_log': True,
|
||||||
|
},
|
||||||
|
'username': {},
|
||||||
|
'password': {
|
||||||
|
'no_log': True,
|
||||||
|
},
|
||||||
|
'verify_ssl': {
|
||||||
|
'type': 'bool',
|
||||||
|
},
|
||||||
|
'ssl_ca_cert': {
|
||||||
|
'type': 'path',
|
||||||
|
},
|
||||||
|
'cert_file': {
|
||||||
|
'type': 'path',
|
||||||
|
},
|
||||||
|
'key_file': {
|
||||||
|
'type': 'path',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
OPENSHIFT_ARG_SPEC = {
|
||||||
|
'description': {},
|
||||||
|
'display_name': {},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleMixin(object):
|
||||||
|
_argspec_cache = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def argspec(self):
|
||||||
|
"""
|
||||||
|
Introspect the model properties, and return an Ansible module arg_spec dict.
|
||||||
|
:return: dict
|
||||||
|
"""
|
||||||
|
if self._argspec_cache:
|
||||||
|
return self._argspec_cache
|
||||||
|
argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
|
||||||
|
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
|
||||||
|
argument_spec.update(copy.deepcopy(OPENSHIFT_ARG_SPEC))
|
||||||
|
argument_spec.update(self.__transform_properties(self.properties))
|
||||||
|
self._argspec_cache = argument_spec
|
||||||
|
return self._argspec_cache
|
||||||
|
|
||||||
|
def object_from_params(self, module_params, obj=None):
|
||||||
|
"""
|
||||||
|
Update a model object with Ansible module param values. Optionally pass an object
|
||||||
|
to update, otherwise a new object will be created.
|
||||||
|
:param module_params: dict of key:value pairs
|
||||||
|
:param obj: model object to update
|
||||||
|
:return: updated model object
|
||||||
|
"""
|
||||||
|
if not obj:
|
||||||
|
obj = self.model()
|
||||||
|
obj.kind = string_utils.snake_case_to_camel(self.kind, upper_case_first=False)
|
||||||
|
obj.api_version = self.api_version.lower()
|
||||||
|
for param_name, param_value in iteritems(module_params):
|
||||||
|
spec = self.find_arg_spec(param_name)
|
||||||
|
if param_value is not None and spec.get('property_path'):
|
||||||
|
prop_path = copy.copy(spec['property_path'])
|
||||||
|
self.__set_obj_attribute(obj, prop_path, param_value, param_name)
|
||||||
|
|
||||||
|
if self.kind.lower() == 'project' and (module_params.get('display_name') or
|
||||||
|
module_params.get('description')):
|
||||||
|
if not obj.metadata.annotations:
|
||||||
|
obj.metadata.annotations = {}
|
||||||
|
if module_params.get('display_name'):
|
||||||
|
obj.metadata.annotations['openshift.io/display-name'] = module_params['display_name']
|
||||||
|
if module_params.get('description'):
|
||||||
|
obj.metadata.annotations['openshift.io/description'] = module_params['description']
|
||||||
|
elif (self.kind.lower() == 'secret' and getattr(obj, 'string_data', None)
|
||||||
|
and hasattr(obj, 'data')):
|
||||||
|
if obj.data is None:
|
||||||
|
obj.data = {}
|
||||||
|
|
||||||
|
# Do a base64 conversion of `string_data` and place it in
|
||||||
|
# `data` so that later comparisons to existing objects
|
||||||
|
# (if any) do not result in requiring an unnecessary change.
|
||||||
|
for key, value in iteritems(obj.string_data):
|
||||||
|
obj.data[key] = base64.b64encode(value)
|
||||||
|
|
||||||
|
obj.string_data = None
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def request_body_from_params(self, module_params):
|
||||||
|
request = {
|
||||||
|
'kind': self.base_model_name,
|
||||||
|
}
|
||||||
|
for param_name, param_value in iteritems(module_params):
|
||||||
|
spec = self.find_arg_spec(param_name)
|
||||||
|
if spec and spec.get('property_path') and param_value is not None:
|
||||||
|
self.__add_path_to_dict(request, param_name, param_value, spec['property_path'])
|
||||||
|
|
||||||
|
if self.kind.lower() == 'project' and (module_params.get('display_name') or
|
||||||
|
module_params.get('description')):
|
||||||
|
if not request.get('metadata'):
|
||||||
|
request['metadata'] = {}
|
||||||
|
if not request['metadata'].get('annotations'):
|
||||||
|
request['metadata']['annotations'] = {}
|
||||||
|
if module_params.get('display_name'):
|
||||||
|
request['metadata']['annotations']['openshift.io/display-name'] = module_params['display_name']
|
||||||
|
if module_params.get('description'):
|
||||||
|
request['metadata']['annotations']['openshift.io/description'] = module_params['description']
|
||||||
|
return request
|
||||||
|
|
||||||
|
def find_arg_spec(self, module_param_name):
|
||||||
|
"""For testing, allow the param_name value to be an alias"""
|
||||||
|
if module_param_name in self.argspec:
|
||||||
|
return self.argspec[module_param_name]
|
||||||
|
result = None
|
||||||
|
for key, value in iteritems(self.argspec):
|
||||||
|
if value.get('aliases'):
|
||||||
|
for alias in value['aliases']:
|
||||||
|
if alias == module_param_name:
|
||||||
|
result = self.argspec[key]
|
||||||
|
break
|
||||||
|
if result:
|
||||||
|
break
|
||||||
|
if not result:
|
||||||
|
raise KubernetesException(
|
||||||
|
"Error: received unrecognized module parameter {0}".format(module_param_name)
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __convert_params_to_choices(properties):
|
||||||
|
def snake_case(name):
|
||||||
|
result = string_utils.snake_case_to_camel(name.replace('_params', ''), upper_case_first=True)
|
||||||
|
return result[:1].upper() + result[1:]
|
||||||
|
choices = {}
|
||||||
|
for x in list(properties.keys()):
|
||||||
|
if x.endswith('params'):
|
||||||
|
choices[x] = snake_case(x)
|
||||||
|
return choices
|
||||||
|
|
||||||
|
def __add_path_to_dict(self, request_dict, param_name, param_value, path):
|
||||||
|
local_path = copy.copy(path)
|
||||||
|
spec = self.find_arg_spec(param_name)
|
||||||
|
while len(local_path):
|
||||||
|
p = string_utils.snake_case_to_camel(local_path.pop(0), upper_case_first=False)
|
||||||
|
if len(local_path):
|
||||||
|
if request_dict.get(p, None) is None:
|
||||||
|
request_dict[p] = {}
|
||||||
|
self.__add_path_to_dict(request_dict[p], param_name, param_value, local_path)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
param_type = spec.get('type', 'str')
|
||||||
|
if param_type == 'dict':
|
||||||
|
request_dict[p] = self.__dict_keys_to_camel(param_name, param_value)
|
||||||
|
elif param_type == 'list':
|
||||||
|
request_dict[p] = self.__list_keys_to_camel(param_name, param_value)
|
||||||
|
else:
|
||||||
|
request_dict[p] = param_value
|
||||||
|
|
||||||
|
def __dict_keys_to_camel(self, param_name, param_dict):
|
||||||
|
result = {}
|
||||||
|
for item, value in iteritems(param_dict):
|
||||||
|
key_name = self.__property_name_to_camel(param_name, item)
|
||||||
|
if value:
|
||||||
|
if isinstance(value, list):
|
||||||
|
result[key_name] = self.__list_keys_to_camel(param_name, value)
|
||||||
|
elif isinstance(value, dict):
|
||||||
|
result[key_name] = self.__dict_keys_to_camel(param_name, value)
|
||||||
|
else:
|
||||||
|
result[key_name] = value
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __property_name_to_camel(param_name, property_name):
|
||||||
|
new_name = property_name
|
||||||
|
if 'annotations' not in param_name and 'labels' not in param_name and 'selector' not in param_name:
|
||||||
|
camel_name = string_utils.snake_case_to_camel(property_name, upper_case_first=False)
|
||||||
|
new_name = camel_name[1:] if camel_name.startswith('_') else camel_name
|
||||||
|
return new_name
|
||||||
|
|
||||||
|
def __list_keys_to_camel(self, param_name, param_list):
|
||||||
|
result = []
|
||||||
|
if isinstance(param_list[0], dict):
|
||||||
|
for item in param_list:
|
||||||
|
result.append(self.__dict_keys_to_camel(param_name, item))
|
||||||
|
else:
|
||||||
|
result = param_list
|
||||||
|
return result
|
||||||
|
|
||||||
|
def __set_obj_attribute(self, obj, property_path, param_value, param_name):
|
||||||
|
"""
|
||||||
|
Recursively set object properties
|
||||||
|
:param obj: The object on which to set a property value.
|
||||||
|
:param property_path: A list of property names in the form of strings.
|
||||||
|
:param param_value: The value to set.
|
||||||
|
:return: The original object.
|
||||||
|
"""
|
||||||
|
while len(property_path) > 0:
|
||||||
|
raw_prop_name = property_path.pop(0)
|
||||||
|
prop_name = PYTHON_KEYWORD_MAPPING.get(raw_prop_name, raw_prop_name)
|
||||||
|
prop_kind = obj.swagger_types[prop_name]
|
||||||
|
if prop_kind in PRIMITIVES:
|
||||||
|
try:
|
||||||
|
setattr(obj, prop_name, param_value)
|
||||||
|
except ValueError as exc:
|
||||||
|
msg = str(exc)
|
||||||
|
if param_value is None and 'None' in msg:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise KubernetesException(
|
||||||
|
"Error setting {0} to {1}: {2}".format(prop_name, param_value, msg)
|
||||||
|
)
|
||||||
|
elif prop_kind.startswith('dict('):
|
||||||
|
if not getattr(obj, prop_name):
|
||||||
|
setattr(obj, prop_name, param_value)
|
||||||
|
else:
|
||||||
|
self.__compare_dict(getattr(obj, prop_name), param_value, param_name)
|
||||||
|
elif prop_kind.startswith('list['):
|
||||||
|
if getattr(obj, prop_name) is None:
|
||||||
|
setattr(obj, prop_name, [])
|
||||||
|
obj_type = prop_kind.replace('list[', '').replace(']', '')
|
||||||
|
if obj_type not in PRIMITIVES and obj_type not in ('list', 'dict'):
|
||||||
|
self.__compare_obj_list(getattr(obj, prop_name), param_value, obj_type, param_name)
|
||||||
|
else:
|
||||||
|
self.__compare_list(getattr(obj, prop_name), param_value, param_name)
|
||||||
|
else:
|
||||||
|
# prop_kind is an object class
|
||||||
|
sub_obj = getattr(obj, prop_name)
|
||||||
|
if not sub_obj:
|
||||||
|
sub_obj = self.model_class_from_name(prop_kind)()
|
||||||
|
setattr(obj, prop_name, self.__set_obj_attribute(sub_obj, property_path, param_value, param_name))
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def __compare_list(self, src_values, request_values, param_name):
|
||||||
|
"""
|
||||||
|
Compare src_values list with request_values list, and append any missing
|
||||||
|
request_values to src_values.
|
||||||
|
"""
|
||||||
|
if not request_values:
|
||||||
|
return
|
||||||
|
|
||||||
|
if not src_values:
|
||||||
|
src_values += request_values
|
||||||
|
|
||||||
|
if type(src_values[0]).__name__ in PRIMITIVES:
|
||||||
|
if set(src_values) >= set(request_values):
|
||||||
|
# src_value list includes request_value list
|
||||||
|
return
|
||||||
|
# append the missing elements from request value
|
||||||
|
src_values += list(set(request_values) - set(src_values))
|
||||||
|
elif type(src_values[0]).__name__ == 'dict':
|
||||||
|
missing = []
|
||||||
|
for request_dict in request_values:
|
||||||
|
match = False
|
||||||
|
for src_dict in src_values:
|
||||||
|
if '__cmp__' in dir(src_dict):
|
||||||
|
# python < 3
|
||||||
|
if src_dict >= request_dict:
|
||||||
|
match = True
|
||||||
|
break
|
||||||
|
elif iteritems(src_dict) == iteritems(request_dict):
|
||||||
|
# python >= 3
|
||||||
|
match = True
|
||||||
|
break
|
||||||
|
if not match:
|
||||||
|
missing.append(request_dict)
|
||||||
|
src_values += missing
|
||||||
|
elif type(src_values[0]).__name__ == 'list':
|
||||||
|
missing = []
|
||||||
|
for request_list in request_values:
|
||||||
|
match = False
|
||||||
|
for src_list in src_values:
|
||||||
|
if set(request_list) >= set(src_list):
|
||||||
|
match = True
|
||||||
|
break
|
||||||
|
if not match:
|
||||||
|
missing.append(request_list)
|
||||||
|
src_values += missing
|
||||||
|
else:
|
||||||
|
raise KubernetesException(
|
||||||
|
"Evaluating {0}: encountered unimplemented type {1} in "
|
||||||
|
"__compare_list()".format(param_name, type(src_values[0]).__name__)
|
||||||
|
)
|
||||||
|
|
||||||
|
def __compare_dict(self, src_value, request_value, param_name):
|
||||||
|
"""
|
||||||
|
Compare src_value dict with request_value dict, and update src_value with any differences.
|
||||||
|
Does not remove items from src_value dict.
|
||||||
|
"""
|
||||||
|
if not request_value:
|
||||||
|
return
|
||||||
|
for item, value in iteritems(request_value):
|
||||||
|
if type(value).__name__ in ('str', 'int', 'bool'):
|
||||||
|
src_value[item] = value
|
||||||
|
elif type(value).__name__ == 'list':
|
||||||
|
self.__compare_list(src_value[item], value, param_name)
|
||||||
|
elif type(value).__name__ == 'dict':
|
||||||
|
self.__compare_dict(src_value[item], value, param_name)
|
||||||
|
else:
|
||||||
|
raise KubernetesException(
|
||||||
|
"Evaluating {0}: encountered unimplemented type {1} in "
|
||||||
|
"__compare_dict()".format(param_name, type(value).__name__)
|
||||||
|
)
|
||||||
|
|
||||||
|
def __compare_obj_list(self, src_value, request_value, obj_class, param_name):
|
||||||
|
"""
|
||||||
|
Compare a src_value (list of ojects) with a request_value (list of dicts), and update
|
||||||
|
src_value with differences. Assumes each object and each dict has a 'name' attributes,
|
||||||
|
which can be used for matching. Elements are not removed from the src_value list.
|
||||||
|
"""
|
||||||
|
if not request_value:
|
||||||
|
return
|
||||||
|
|
||||||
|
model_class = self.model_class_from_name(obj_class)
|
||||||
|
|
||||||
|
# Try to determine the unique key for the array
|
||||||
|
key_names = [
|
||||||
|
'name',
|
||||||
|
'type'
|
||||||
|
]
|
||||||
|
key_name = None
|
||||||
|
for key in key_names:
|
||||||
|
if hasattr(model_class, key):
|
||||||
|
key_name = key
|
||||||
|
break
|
||||||
|
|
||||||
|
if key_name:
|
||||||
|
# If the key doesn't exist in the request values, then ignore it, rather than throwing an error
|
||||||
|
for item in request_value:
|
||||||
|
if not item.get(key_name):
|
||||||
|
key_name = None
|
||||||
|
break
|
||||||
|
|
||||||
|
if key_name:
|
||||||
|
# compare by key field
|
||||||
|
for item in request_value:
|
||||||
|
if not item.get(key_name):
|
||||||
|
# Prevent user from creating something that will be impossible to patch or update later
|
||||||
|
raise KubernetesException(
|
||||||
|
"Evaluating {0} - expecting parameter {1} to contain a `{2}` attribute "
|
||||||
|
"in __compare_obj_list().".format(param_name,
|
||||||
|
self.get_base_model_name_snake(obj_class),
|
||||||
|
key_name)
|
||||||
|
)
|
||||||
|
found = False
|
||||||
|
for obj in src_value:
|
||||||
|
if not obj:
|
||||||
|
continue
|
||||||
|
if getattr(obj, key_name) == item[key_name]:
|
||||||
|
# Assuming both the src_value and the request value include a name property
|
||||||
|
found = True
|
||||||
|
for key, value in iteritems(item):
|
||||||
|
snake_key = self.attribute_to_snake(key)
|
||||||
|
item_kind = model_class.swagger_types.get(snake_key)
|
||||||
|
if item_kind and item_kind in PRIMITIVES or type(value).__name__ in PRIMITIVES:
|
||||||
|
setattr(obj, snake_key, value)
|
||||||
|
elif item_kind and item_kind.startswith('list['):
|
||||||
|
obj_type = item_kind.replace('list[', '').replace(']', '')
|
||||||
|
if getattr(obj, snake_key) is None:
|
||||||
|
setattr(obj, snake_key, [])
|
||||||
|
if obj_type not in ('str', 'int', 'bool', 'object'):
|
||||||
|
self.__compare_obj_list(getattr(obj, snake_key), value, obj_type, param_name)
|
||||||
|
else:
|
||||||
|
# Straight list comparison
|
||||||
|
self.__compare_list(getattr(obj, snake_key), value, param_name)
|
||||||
|
elif item_kind and item_kind.startswith('dict('):
|
||||||
|
self.__compare_dict(getattr(obj, snake_key), value, param_name)
|
||||||
|
elif item_kind and type(value).__name__ == 'dict':
|
||||||
|
# object
|
||||||
|
param_obj = getattr(obj, snake_key)
|
||||||
|
if not param_obj:
|
||||||
|
setattr(obj, snake_key, self.model_class_from_name(item_kind)())
|
||||||
|
param_obj = getattr(obj, snake_key)
|
||||||
|
self.__update_object_properties(param_obj, value)
|
||||||
|
else:
|
||||||
|
if item_kind:
|
||||||
|
raise KubernetesException(
|
||||||
|
"Evaluating {0}: encountered unimplemented type {1} in "
|
||||||
|
"__compare_obj_list() for model {2}".format(
|
||||||
|
param_name,
|
||||||
|
item_kind,
|
||||||
|
self.get_base_model_name_snake(obj_class))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise KubernetesException(
|
||||||
|
"Evaluating {0}: unable to get swagger_type for {1} in "
|
||||||
|
"__compare_obj_list() for item {2} in model {3}".format(
|
||||||
|
param_name,
|
||||||
|
snake_key,
|
||||||
|
str(item),
|
||||||
|
self.get_base_model_name_snake(obj_class))
|
||||||
|
)
|
||||||
|
if not found:
|
||||||
|
# Requested item not found. Adding.
|
||||||
|
obj = self.model_class_from_name(obj_class)(**item)
|
||||||
|
src_value.append(obj)
|
||||||
|
else:
|
||||||
|
# There isn't a key, or we don't know what it is, so check for all properties to match
|
||||||
|
for item in request_value:
|
||||||
|
found = False
|
||||||
|
for obj in src_value:
|
||||||
|
match = True
|
||||||
|
for item_key, item_value in iteritems(item):
|
||||||
|
# TODO: this should probably take the property type into account
|
||||||
|
snake_key = self.attribute_to_snake(item_key)
|
||||||
|
if getattr(obj, snake_key) != item_value:
|
||||||
|
match = False
|
||||||
|
break
|
||||||
|
if match:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
if not found:
|
||||||
|
obj = self.model_class_from_name(obj_class)(**item)
|
||||||
|
src_value.append(obj)
|
||||||
|
|
||||||
|
def __update_object_properties(self, obj, item):
|
||||||
|
""" Recursively update an object's properties. Returns a pointer to the object. """
|
||||||
|
|
||||||
|
for key, value in iteritems(item):
|
||||||
|
snake_key = self.attribute_to_snake(key)
|
||||||
|
try:
|
||||||
|
kind = obj.swagger_types[snake_key]
|
||||||
|
except (AttributeError, KeyError):
|
||||||
|
possible_matches = ', '.join(list(obj.swagger_types.keys()))
|
||||||
|
class_snake_name = self.get_base_model_name_snake(type(obj).__name__)
|
||||||
|
raise KubernetesException(
|
||||||
|
"Unable to find '{0}' in {1}. Valid property names include: {2}".format(snake_key,
|
||||||
|
class_snake_name,
|
||||||
|
possible_matches)
|
||||||
|
)
|
||||||
|
if kind in PRIMITIVES or kind.startswith('list[') or kind.startswith('dict('):
|
||||||
|
self.__set_obj_attribute(obj, [snake_key], value, snake_key)
|
||||||
|
else:
|
||||||
|
# kind is an object, hopefully
|
||||||
|
if not getattr(obj, snake_key):
|
||||||
|
setattr(obj, snake_key, self.model_class_from_name(kind)())
|
||||||
|
self.__update_object_properties(getattr(obj, snake_key), value)
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def __transform_properties(self, properties, prefix='', path=None, alternate_prefix=''):
|
||||||
|
"""
|
||||||
|
Convert a list of properties to an argument_spec dictionary
|
||||||
|
|
||||||
|
:param properties: List of properties from self.properties_from_model_class()
|
||||||
|
:param prefix: String to prefix to argument names.
|
||||||
|
:param path: List of property names providing the recursive path through the model to the property
|
||||||
|
:param alternate_prefix: a more minimal version of prefix
|
||||||
|
:return: dict
|
||||||
|
"""
|
||||||
|
primitive_types = list(PRIMITIVES) + ['list', 'dict']
|
||||||
|
args = {}
|
||||||
|
|
||||||
|
if path is None:
|
||||||
|
path = []
|
||||||
|
|
||||||
|
def add_meta(prop_name, prop_prefix, prop_alt_prefix):
|
||||||
|
""" Adds metadata properties to the argspec """
|
||||||
|
# if prop_alt_prefix != prop_prefix:
|
||||||
|
# if prop_alt_prefix:
|
||||||
|
# args[prop_prefix + prop_name]['aliases'] = [prop_alt_prefix + prop_name]
|
||||||
|
# elif prop_prefix:
|
||||||
|
# args[prop_prefix + prop_name]['aliases'] = [prop_name]
|
||||||
|
prop_paths = copy.copy(path) # copy path from outer scope
|
||||||
|
prop_paths.append('metadata')
|
||||||
|
prop_paths.append(prop_name)
|
||||||
|
args[prop_prefix + prop_name]['property_path'] = prop_paths
|
||||||
|
|
||||||
|
for raw_prop, prop_attributes in iteritems(properties):
|
||||||
|
prop = PYTHON_KEYWORD_MAPPING.get(raw_prop, raw_prop)
|
||||||
|
if prop in ('api_version', 'status', 'kind', 'items') and not prefix:
|
||||||
|
# Don't expose these properties
|
||||||
|
continue
|
||||||
|
elif prop_attributes['immutable']:
|
||||||
|
# Property cannot be set by the user
|
||||||
|
continue
|
||||||
|
elif prop == 'metadata' and prop_attributes['class'].__name__ == 'UnversionedListMeta':
|
||||||
|
args['namespace'] = {}
|
||||||
|
elif prop == 'metadata' and prop_attributes['class'].__name__ != 'UnversionedListMeta':
|
||||||
|
meta_prefix = prefix + '_metadata_' if prefix else ''
|
||||||
|
meta_alt_prefix = alternate_prefix + '_metadata_' if alternate_prefix else ''
|
||||||
|
if meta_prefix and not meta_alt_prefix:
|
||||||
|
meta_alt_prefix = meta_prefix
|
||||||
|
if 'labels' in dir(prop_attributes['class']):
|
||||||
|
args[meta_prefix + 'labels'] = {
|
||||||
|
'type': 'dict',
|
||||||
|
}
|
||||||
|
add_meta('labels', meta_prefix, meta_alt_prefix)
|
||||||
|
if 'annotations' in dir(prop_attributes['class']):
|
||||||
|
args[meta_prefix + 'annotations'] = {
|
||||||
|
'type': 'dict',
|
||||||
|
}
|
||||||
|
add_meta('annotations', meta_prefix, meta_alt_prefix)
|
||||||
|
if 'namespace' in dir(prop_attributes['class']):
|
||||||
|
args[meta_prefix + 'namespace'] = {}
|
||||||
|
add_meta('namespace', meta_prefix, meta_alt_prefix)
|
||||||
|
if 'name' in dir(prop_attributes['class']):
|
||||||
|
args[meta_prefix + 'name'] = {}
|
||||||
|
add_meta('name', meta_prefix, meta_alt_prefix)
|
||||||
|
elif prop_attributes['class'].__name__ not in primitive_types and not prop.endswith('params'):
|
||||||
|
# Adds nested properties recursively
|
||||||
|
|
||||||
|
label = prop
|
||||||
|
|
||||||
|
# Provide a more human-friendly version of the prefix
|
||||||
|
alternate_label = label\
|
||||||
|
.replace('spec', '')\
|
||||||
|
.replace('template', '')\
|
||||||
|
.replace('config', '')
|
||||||
|
|
||||||
|
p = prefix
|
||||||
|
p += '_' + label if p else label
|
||||||
|
a = alternate_prefix
|
||||||
|
paths = copy.copy(path)
|
||||||
|
paths.append(prop)
|
||||||
|
|
||||||
|
# if alternate_prefix:
|
||||||
|
# # Prevent the last prefix from repeating. In other words, avoid things like 'pod_pod'
|
||||||
|
# pieces = alternate_prefix.split('_')
|
||||||
|
# alternate_label = alternate_label.replace(pieces[len(pieces) - 1] + '_', '', 1)
|
||||||
|
# if alternate_label != self.base_model_name and alternate_label not in a:
|
||||||
|
# a += '_' + alternate_label if a else alternate_label
|
||||||
|
if prop.endswith('params') and 'type' in properties:
|
||||||
|
sub_props = dict()
|
||||||
|
sub_props[prop] = {
|
||||||
|
'class': dict,
|
||||||
|
'immutable': False
|
||||||
|
}
|
||||||
|
args.update(self.__transform_properties(sub_props, prefix=p, path=paths, alternate_prefix=a))
|
||||||
|
else:
|
||||||
|
sub_props = self.properties_from_model_class(prop_attributes['class'])
|
||||||
|
args.update(self.__transform_properties(sub_props, prefix=p, path=paths, alternate_prefix=a))
|
||||||
|
else:
|
||||||
|
# Adds a primitive property
|
||||||
|
arg_prefix = prefix + '_' if prefix else ''
|
||||||
|
arg_alt_prefix = alternate_prefix + '_' if alternate_prefix else ''
|
||||||
|
paths = copy.copy(path)
|
||||||
|
paths.append(prop)
|
||||||
|
|
||||||
|
property_type = prop_attributes['class'].__name__
|
||||||
|
if property_type == 'object':
|
||||||
|
property_type = 'str'
|
||||||
|
|
||||||
|
args[arg_prefix + prop] = {
|
||||||
|
'required': False,
|
||||||
|
'type': property_type,
|
||||||
|
'property_path': paths
|
||||||
|
}
|
||||||
|
|
||||||
|
if prop.endswith('params') and 'type' in properties:
|
||||||
|
args[arg_prefix + prop]['type'] = 'dict'
|
||||||
|
|
||||||
|
# Use the alternate prefix to construct a human-friendly alias
|
||||||
|
if arg_alt_prefix and arg_prefix != arg_alt_prefix:
|
||||||
|
args[arg_prefix + prop]['aliases'] = [arg_alt_prefix + prop]
|
||||||
|
elif arg_prefix:
|
||||||
|
args[arg_prefix + prop]['aliases'] = [prop]
|
||||||
|
|
||||||
|
if prop == 'type':
|
||||||
|
choices = self.__convert_params_to_choices(properties)
|
||||||
|
if len(choices) > 0:
|
||||||
|
args[arg_prefix + prop]['choices'] = choices
|
||||||
|
return args
|
|
@ -18,24 +18,22 @@
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
from ansible.module_utils.k8s.common import K8sAnsibleMixin, HAS_K8S_MODULE_HELPER
|
from ansible.module_utils.six import iteritems
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from ansible.errors import AnsibleError
|
from openshift.helper.kubernetes import KubernetesObjectHelper
|
||||||
except ImportError:
|
from openshift.helper.openshift import OpenShiftObjectHelper
|
||||||
AnsibleError = Exception
|
from openshift.helper.exceptions import KubernetesException
|
||||||
|
HAS_K8S_MODULE_HELPER = True
|
||||||
try:
|
except ImportError as exc:
|
||||||
from openshift.dynamic.exceptions import DynamicApiError
|
HAS_K8S_MODULE_HELPER = False
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class K8sInventoryException(Exception):
|
class K8sInventoryException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class K8sInventoryHelper(K8sAnsibleMixin):
|
class K8sInventoryHelper(object):
|
||||||
helper = None
|
helper = None
|
||||||
transport = 'kubectl'
|
transport = 'kubectl'
|
||||||
|
|
||||||
|
@ -58,7 +56,7 @@ class K8sInventoryHelper(K8sAnsibleMixin):
|
||||||
self.fetch_objects(connections)
|
self.fetch_objects(connections)
|
||||||
|
|
||||||
def fetch_objects(self, connections):
|
def fetch_objects(self, connections):
|
||||||
client = self.get_api_client()
|
self.helper = self.get_helper('v1', 'namespace_list')
|
||||||
|
|
||||||
if connections:
|
if connections:
|
||||||
if not isinstance(connections, list):
|
if not isinstance(connections, list):
|
||||||
|
@ -67,50 +65,68 @@ class K8sInventoryHelper(K8sAnsibleMixin):
|
||||||
for connection in connections:
|
for connection in connections:
|
||||||
if not isinstance(connection, dict):
|
if not isinstance(connection, dict):
|
||||||
raise K8sInventoryException("Expecting connection to be a dictionary.")
|
raise K8sInventoryException("Expecting connection to be a dictionary.")
|
||||||
client = self.get_api_client(**connection)
|
self.authenticate(connection)
|
||||||
name = connection.get('name', self.get_default_host_name(client.configuration.host))
|
name = connection.get('name', self.get_default_host_name(self.helper.api_client.host))
|
||||||
if connection.get('namespaces'):
|
if connection.get('namespaces'):
|
||||||
namespaces = connections['namespaces']
|
namespaces = connections['namespaces']
|
||||||
else:
|
else:
|
||||||
namespaces = self.get_available_namespaces(client)
|
namespaces = self.get_available_namespaces()
|
||||||
for namespace in namespaces:
|
for namespace in namespaces:
|
||||||
self.get_pods_for_namespace(client, name, namespace)
|
self.get_pods_for_namespace(name, namespace)
|
||||||
self.get_services_for_namespace(client, name, namespace)
|
self.get_services_for_namespace(name, namespace)
|
||||||
else:
|
else:
|
||||||
name = self.get_default_host_name(client.configuration.host)
|
name = self.get_default_host_name(self.helper.api_client.host)
|
||||||
namespaces = self.get_available_namespaces(client)
|
namespaces = self.get_available_namespaces()
|
||||||
for namespace in namespaces:
|
for namespace in namespaces:
|
||||||
self.get_pods_for_namespace(client, name, namespace)
|
self.get_pods_for_namespace(name, namespace)
|
||||||
self.get_services_for_namespace(client, name, namespace)
|
self.get_services_for_namespace(name, namespace)
|
||||||
|
|
||||||
|
def authenticate(self, connection=None):
|
||||||
|
auth_options = {}
|
||||||
|
if connection:
|
||||||
|
auth_args = ('host', 'api_key', 'kubeconfig', 'context', 'username', 'password',
|
||||||
|
'cert_file', 'key_file', 'ssl_ca_cert', 'verify_ssl')
|
||||||
|
for key, value in iteritems(connection):
|
||||||
|
if key in auth_args and value is not None:
|
||||||
|
auth_options[key] = value
|
||||||
|
try:
|
||||||
|
self.helper.set_client_config(**auth_options)
|
||||||
|
except KubernetesException as exc:
|
||||||
|
raise K8sInventoryException('Error connecting to the API: {0}'.format(exc.message))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_default_host_name(host):
|
def get_default_host_name(host):
|
||||||
return host.replace('https://', '').replace('http://', '').replace('.', '-').replace(':', '_')
|
return host.replace('https://', '').replace('http://', '').replace('.', '-').replace(':', '_')
|
||||||
|
|
||||||
def get_available_namespaces(self, client):
|
def get_helper(self, api_version, kind):
|
||||||
v1_namespace = client.resources.get(api_version='v1', kind='Namespace')
|
|
||||||
try:
|
try:
|
||||||
obj = v1_namespace.get()
|
helper = KubernetesObjectHelper(api_version=api_version, kind=kind, debug=False)
|
||||||
except DynamicApiError as exc:
|
helper.get_model(api_version, kind)
|
||||||
|
return helper
|
||||||
|
except KubernetesException as exc:
|
||||||
|
raise K8sInventoryException('Error initializing object helper: {0}'.format(exc.message))
|
||||||
|
|
||||||
|
def get_available_namespaces(self):
|
||||||
|
try:
|
||||||
|
obj = self.helper.get_object()
|
||||||
|
except KubernetesObjectHelper as exc:
|
||||||
raise K8sInventoryException('Error fetching Namespace list: {0}'.format(exc.message))
|
raise K8sInventoryException('Error fetching Namespace list: {0}'.format(exc.message))
|
||||||
return [namespace.metadata.name for namespace in obj.items]
|
return [namespace.metadata.name for namespace in obj.items]
|
||||||
|
|
||||||
def get_pods_for_namespace(self, client, name, namespace):
|
def get_pods_for_namespace(self, name, namespace):
|
||||||
v1_pod = client.resources.get(api_version='v1', kind='Pod')
|
self.helper.set_model('v1', 'pod_list')
|
||||||
try:
|
try:
|
||||||
obj = v1_pod.get(namespace=namespace)
|
obj = self.helper.get_object(namespace=namespace)
|
||||||
except DynamicApiError as exc:
|
except KubernetesException as exc:
|
||||||
raise K8sInventoryException('Error fetching Pod list: {0}'.format(exc.message))
|
raise K8sInventoryException('Error fetching Pod list: {0}'.format(exc.message))
|
||||||
|
|
||||||
namespace_group = 'namespace_{0}'.format(namespace)
|
namespace_pod_group = '{0}_pods'.format(namespace)
|
||||||
namespace_pods_group = '{0}_pods'.format(namespace_group)
|
|
||||||
|
|
||||||
self.inventory.add_group(name)
|
self.inventory.add_group(name)
|
||||||
self.inventory.add_group(namespace_group)
|
self.inventory.add_group(namespace)
|
||||||
self.inventory.add_child(name, namespace_group)
|
self.inventory.add_child(name, namespace)
|
||||||
self.inventory.add_group(namespace_pods_group)
|
self.inventory.add_group(namespace_pod_group)
|
||||||
self.inventory.add_child(namespace_group, namespace_pods_group)
|
self.inventory.add_child(namespace, namespace_pod_group)
|
||||||
|
|
||||||
for pod in obj.items:
|
for pod in obj.items:
|
||||||
pod_name = pod.metadata.name
|
pod_name = pod.metadata.name
|
||||||
pod_groups = []
|
pod_groups = []
|
||||||
|
@ -120,17 +136,17 @@ class K8sInventoryHelper(K8sAnsibleMixin):
|
||||||
if pod.metadata.labels:
|
if pod.metadata.labels:
|
||||||
pod_labels = pod.metadata.labels
|
pod_labels = pod.metadata.labels
|
||||||
# create a group for each label_value
|
# create a group for each label_value
|
||||||
for key, value in pod.metadata.labels:
|
for key, value in iteritems(pod.metadata.labels):
|
||||||
group_name = 'label_{0}_{1}'.format(key, value)
|
group_name = '{0}_{1}'.format(key, value)
|
||||||
if group_name not in pod_groups:
|
if group_name not in pod_groups:
|
||||||
pod_groups.append(group_name)
|
pod_groups.append(group_name)
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
for container in pod.status.containerStatuses:
|
for container in pod.status.container_statuses:
|
||||||
# add each pod_container to the namespace group, and to each label_value group
|
# add each pod_container to the namespace group, and to each label_value group
|
||||||
container_name = '{0}_{1}'.format(pod.metadata.name, container.name)
|
container_name = '{0}_{1}'.format(pod.metadata.name, container.name)
|
||||||
self.inventory.add_host(container_name)
|
self.inventory.add_host(container_name)
|
||||||
self.inventory.add_child(namespace_pods_group, container_name)
|
self.inventory.add_child(namespace_pod_group, container_name)
|
||||||
if pod_groups:
|
if pod_groups:
|
||||||
for group in pod_groups:
|
for group in pod_groups:
|
||||||
self.inventory.add_child(group, container_name)
|
self.inventory.add_child(group, container_name)
|
||||||
|
@ -139,14 +155,14 @@ class K8sInventoryHelper(K8sAnsibleMixin):
|
||||||
self.inventory.set_variable(container_name, 'object_type', 'pod')
|
self.inventory.set_variable(container_name, 'object_type', 'pod')
|
||||||
self.inventory.set_variable(container_name, 'labels', pod_labels)
|
self.inventory.set_variable(container_name, 'labels', pod_labels)
|
||||||
self.inventory.set_variable(container_name, 'annotations', pod_annotations)
|
self.inventory.set_variable(container_name, 'annotations', pod_annotations)
|
||||||
self.inventory.set_variable(container_name, 'cluster_name', pod.metadata.clusterName)
|
self.inventory.set_variable(container_name, 'cluster_name', pod.metadata.cluster_name)
|
||||||
self.inventory.set_variable(container_name, 'pod_node_name', pod.spec.nodeName)
|
self.inventory.set_variable(container_name, 'pod_node_name', pod.spec.node_name)
|
||||||
self.inventory.set_variable(container_name, 'pod_name', pod.spec.name)
|
self.inventory.set_variable(container_name, 'pod_name', pod.spec.node_name)
|
||||||
self.inventory.set_variable(container_name, 'pod_host_ip', pod.status.hostIP)
|
self.inventory.set_variable(container_name, 'pod_host_ip', pod.status.host_ip)
|
||||||
self.inventory.set_variable(container_name, 'pod_phase', pod.status.phase)
|
self.inventory.set_variable(container_name, 'pod_phase', pod.status.phase)
|
||||||
self.inventory.set_variable(container_name, 'pod_ip', pod.status.podIP)
|
self.inventory.set_variable(container_name, 'pod_ip', pod.status.pod_ip)
|
||||||
self.inventory.set_variable(container_name, 'pod_self_link', pod.metadata.selfLink)
|
self.inventory.set_variable(container_name, 'pod_self_link', pod.metadata.self_link)
|
||||||
self.inventory.set_variable(container_name, 'pod_resource_version', pod.metadata.resourceVersion)
|
self.inventory.set_variable(container_name, 'pod_resource_version', pod.metadata.resource_version)
|
||||||
self.inventory.set_variable(container_name, 'pod_uid', pod.metadata.uid)
|
self.inventory.set_variable(container_name, 'pod_uid', pod.metadata.uid)
|
||||||
self.inventory.set_variable(container_name, 'container_name', container.image)
|
self.inventory.set_variable(container_name, 'container_name', container.image)
|
||||||
self.inventory.set_variable(container_name, 'container_image', container.image)
|
self.inventory.set_variable(container_name, 'container_image', container.image)
|
||||||
|
@ -163,22 +179,20 @@ class K8sInventoryHelper(K8sAnsibleMixin):
|
||||||
self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport),
|
self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport),
|
||||||
container.name)
|
container.name)
|
||||||
|
|
||||||
def get_services_for_namespace(self, client, name, namespace):
|
def get_services_for_namespace(self, name, namespace):
|
||||||
v1_service = client.resources.get(api_version='v1', kind='Service')
|
self.helper.set_model('v1', 'service_list')
|
||||||
try:
|
try:
|
||||||
obj = v1_service.get(namespace=namespace)
|
obj = self.helper.get_object(namespace=namespace)
|
||||||
except DynamicApiError as exc:
|
except KubernetesException as exc:
|
||||||
raise K8sInventoryException('Error fetching Service list: {0}'.format(exc.message))
|
raise K8sInventoryException('Error fetching Service list: {0}'.format(exc.message))
|
||||||
|
|
||||||
namespace_group = 'namespace_{0}'.format(namespace)
|
namespace_service_group = '{0}_services'.format(namespace)
|
||||||
namespace_services_group = '{0}_services'.format(namespace_group)
|
|
||||||
|
|
||||||
self.inventory.add_group(name)
|
self.inventory.add_group(name)
|
||||||
self.inventory.add_group(namespace_group)
|
self.inventory.add_group(namespace)
|
||||||
self.inventory.add_child(name, namespace_group)
|
self.inventory.add_child(name, namespace)
|
||||||
self.inventory.add_group(namespace_services_group)
|
self.inventory.add_group(namespace_service_group)
|
||||||
self.inventory.add_child(namespace_group, namespace_services_group)
|
self.inventory.add_child(namespace, namespace_service_group)
|
||||||
|
|
||||||
for service in obj.items:
|
for service in obj.items:
|
||||||
service_name = service.metadata.name
|
service_name = service.metadata.name
|
||||||
service_labels = {} if not service.metadata.labels else service.metadata.labels
|
service_labels = {} if not service.metadata.labels else service.metadata.labels
|
||||||
|
@ -188,54 +202,51 @@ class K8sInventoryHelper(K8sAnsibleMixin):
|
||||||
|
|
||||||
if service.metadata.labels:
|
if service.metadata.labels:
|
||||||
# create a group for each label_value
|
# create a group for each label_value
|
||||||
for key, value in service.metadata.labels:
|
for key, value in iteritems(service.metadata.labels):
|
||||||
group_name = 'label_{0}_{1}'.format(key, value)
|
group_name = '{0}_{1}'.format(key, value)
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
self.inventory.add_child(group_name, service_name)
|
self.inventory.add_child(group_name, service_name)
|
||||||
|
|
||||||
try:
|
self.inventory.add_child(namespace_service_group, service_name)
|
||||||
self.inventory.add_child(namespace_services_group, service_name)
|
|
||||||
except AnsibleError as e:
|
|
||||||
raise
|
|
||||||
|
|
||||||
ports = [{'name': port.name,
|
ports = [{'name': port.name,
|
||||||
'port': port.port,
|
'port': port.port,
|
||||||
'protocol': port.protocol,
|
'protocol': port.protocol,
|
||||||
'targetPort': port.targetPort,
|
'targetPort': port.target_port,
|
||||||
'nodePort': port.nodePort} for port in service.spec.ports or []]
|
'nodePort': port.node_port} for port in service.spec.ports]
|
||||||
|
|
||||||
# add hostvars
|
# add hostvars
|
||||||
self.inventory.set_variable(service_name, 'object_type', 'service')
|
self.inventory.set_variable(service_name, 'object_type', 'service')
|
||||||
self.inventory.set_variable(service_name, 'labels', service_labels)
|
self.inventory.set_variable(service_name, 'labels', service_labels)
|
||||||
self.inventory.set_variable(service_name, 'annotations', service_annotations)
|
self.inventory.set_variable(service_name, 'annotations', service_annotations)
|
||||||
self.inventory.set_variable(service_name, 'cluster_name', service.metadata.clusterName)
|
self.inventory.set_variable(service_name, 'cluster_name', service.metadata.cluster_name)
|
||||||
self.inventory.set_variable(service_name, 'ports', ports)
|
self.inventory.set_variable(service_name, 'ports', ports)
|
||||||
self.inventory.set_variable(service_name, 'type', service.spec.type)
|
self.inventory.set_variable(service_name, 'type', service.spec.type)
|
||||||
self.inventory.set_variable(service_name, 'self_link', service.metadata.selfLink)
|
self.inventory.set_variable(service_name, 'self_link', service.metadata.self_link)
|
||||||
self.inventory.set_variable(service_name, 'resource_version', service.metadata.resourceVersion)
|
self.inventory.set_variable(service_name, 'resource_version', service.metadata.resource_version)
|
||||||
self.inventory.set_variable(service_name, 'uid', service.metadata.uid)
|
self.inventory.set_variable(service_name, 'uid', service.metadata.uid)
|
||||||
|
|
||||||
if service.spec.externalTrafficPolicy:
|
if service.spec.external_traffic_policy:
|
||||||
self.inventory.set_variable(service_name, 'external_traffic_policy',
|
self.inventory.set_variable(service_name, 'external_traffic_policy',
|
||||||
service.spec.externalTrafficPolicy)
|
service.spec.external_traffic_policy)
|
||||||
if service.spec.externalIPs:
|
if hasattr(service.spec, 'external_ips') and service.spec.external_ips:
|
||||||
self.inventory.set_variable(service_name, 'external_ips', service.spec.externalIPs)
|
self.inventory.set_variable(service_name, 'external_ips', service.spec.external_ips)
|
||||||
|
|
||||||
if service.spec.externalName:
|
if service.spec.external_name:
|
||||||
self.inventory.set_variable(service_name, 'external_name', service.spec.externalName)
|
self.inventory.set_variable(service_name, 'external_name', service.spec.external_name)
|
||||||
|
|
||||||
if service.spec.healthCheckNodePort:
|
if service.spec.health_check_node_port:
|
||||||
self.inventory.set_variable(service_name, 'health_check_node_port',
|
self.inventory.set_variable(service_name, 'health_check_node_port',
|
||||||
service.spec.healthCheckNodePort)
|
service.spec.health_check_node_port)
|
||||||
if service.spec.loadBalancerIP:
|
if service.spec.load_balancer_ip:
|
||||||
self.inventory.set_variable(service_name, 'load_balancer_ip',
|
self.inventory.set_variable(service_name, 'load_balancer_ip',
|
||||||
service.spec.loadBalancerIP)
|
service.spec.load_balancer_ip)
|
||||||
if service.spec.selector:
|
if service.spec.selector:
|
||||||
self.inventory.set_variable(service_name, 'selector', service.spec.selector)
|
self.inventory.set_variable(service_name, 'selector', service.spec.selector)
|
||||||
|
|
||||||
if hasattr(service.status.loadBalancer, 'ingress') and service.status.loadBalancer.ingress:
|
if hasattr(service.status.load_balancer, 'ingress') and service.status.load_balancer.ingress:
|
||||||
load_balancer = [{'hostname': ingress.hostname,
|
load_balancer = [{'hostname': ingress.hostname,
|
||||||
'ip': ingress.ip} for ingress in service.status.loadBalancer.ingress]
|
'ip': ingress.ip} for ingress in service.status.load_balancer.ingress]
|
||||||
self.inventory.set_variable(service_name, 'load_balancer', load_balancer)
|
self.inventory.set_variable(service_name, 'load_balancer', load_balancer)
|
||||||
|
|
||||||
|
|
||||||
|
@ -245,39 +256,46 @@ class OpenShiftInventoryHelper(K8sInventoryHelper):
|
||||||
|
|
||||||
def fetch_objects(self, connections):
|
def fetch_objects(self, connections):
|
||||||
super(OpenShiftInventoryHelper, self).fetch_objects(connections)
|
super(OpenShiftInventoryHelper, self).fetch_objects(connections)
|
||||||
client = self.get_api_client()
|
self.helper = self.get_helper('v1', 'namespace_list')
|
||||||
|
|
||||||
if connections:
|
if connections:
|
||||||
for connection in connections:
|
for connection in connections:
|
||||||
client = self.get_api_client(**connection)
|
self.authenticate(connection)
|
||||||
name = connection.get('name', self.get_default_host_name(client.configuration.host))
|
name = connection.get('name', self.get_default_host_name(self.helper.api_client.host))
|
||||||
if connection.get('namespaces'):
|
if connection.get('namespaces'):
|
||||||
namespaces = connection['namespaces']
|
namespaces = connection['namespaces']
|
||||||
else:
|
else:
|
||||||
namespaces = self.get_available_namespaces(client)
|
namespaces = self.get_available_namespaces()
|
||||||
for namespace in namespaces:
|
for namespace in namespaces:
|
||||||
self.get_routes_for_namespace(client, name, namespace)
|
self.get_routes_for_namespace(name, namespace)
|
||||||
else:
|
else:
|
||||||
name = self.get_default_host_name(client.configuration.host)
|
name = self.get_default_host_name(self.helper.api_client.host)
|
||||||
namespaces = self.get_available_namespaces(client)
|
namespaces = self.get_available_namespaces()
|
||||||
for namespace in namespaces:
|
for namespace in namespaces:
|
||||||
self.get_routes_for_namespace(client, name, namespace)
|
self.get_routes_for_namespace(name, namespace)
|
||||||
|
|
||||||
def get_routes_for_namespace(self, client, name, namespace):
|
def get_helper(self, api_version, kind):
|
||||||
v1_route = client.resources.get(api_version='v1', kind='Route')
|
|
||||||
try:
|
try:
|
||||||
obj = v1_route.get(namespace=namespace)
|
helper = OpenShiftObjectHelper(api_version=api_version, kind=kind, debug=False)
|
||||||
except DynamicApiError as exc:
|
helper.get_model(api_version, kind)
|
||||||
|
return helper
|
||||||
|
except KubernetesException as exc:
|
||||||
|
raise K8sInventoryException('Error initializing object helper: {0}'.format(exc.message))
|
||||||
|
|
||||||
|
def get_routes_for_namespace(self, name, namespace):
|
||||||
|
self.helper.set_model('v1', 'route_list')
|
||||||
|
try:
|
||||||
|
obj = self.helper.get_object(namespace=namespace)
|
||||||
|
except KubernetesException as exc:
|
||||||
raise K8sInventoryException('Error fetching Routes list: {0}'.format(exc.message))
|
raise K8sInventoryException('Error fetching Routes list: {0}'.format(exc.message))
|
||||||
|
|
||||||
namespace_group = 'namespace_{0}'.format(namespace)
|
namespace_routes_group = '{0}_routes'.format(namespace)
|
||||||
namespace_routes_group = '{0}_routes'.format(namespace_group)
|
|
||||||
|
|
||||||
self.inventory.add_group(name)
|
self.inventory.add_group(name)
|
||||||
self.inventory.add_group(namespace_group)
|
self.inventory.add_group(namespace)
|
||||||
self.inventory.add_child(name, namespace_group)
|
self.inventory.add_child(name, namespace)
|
||||||
self.inventory.add_group(namespace_routes_group)
|
self.inventory.add_group(namespace_routes_group)
|
||||||
self.inventory.add_child(namespace_group, namespace_routes_group)
|
self.inventory.add_child(namespace, namespace_routes_group)
|
||||||
for route in obj.items:
|
for route in obj.items:
|
||||||
route_name = route.metadata.name
|
route_name = route.metadata.name
|
||||||
route_labels = {} if not route.metadata.labels else route.metadata.labels
|
route_labels = {} if not route.metadata.labels else route.metadata.labels
|
||||||
|
@ -287,8 +305,8 @@ class OpenShiftInventoryHelper(K8sInventoryHelper):
|
||||||
|
|
||||||
if route.metadata.labels:
|
if route.metadata.labels:
|
||||||
# create a group for each label_value
|
# create a group for each label_value
|
||||||
for key, value in route.metadata.labels:
|
for key, value in iteritems(route.metadata.labels):
|
||||||
group_name = 'label_{0}_{1}'.format(key, value)
|
group_name = '{0}_{1}'.format(key, value)
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
self.inventory.add_child(group_name, route_name)
|
self.inventory.add_child(group_name, route_name)
|
||||||
|
|
||||||
|
@ -297,10 +315,10 @@ class OpenShiftInventoryHelper(K8sInventoryHelper):
|
||||||
# add hostvars
|
# add hostvars
|
||||||
self.inventory.set_variable(route_name, 'labels', route_labels)
|
self.inventory.set_variable(route_name, 'labels', route_labels)
|
||||||
self.inventory.set_variable(route_name, 'annotations', route_annotations)
|
self.inventory.set_variable(route_name, 'annotations', route_annotations)
|
||||||
self.inventory.set_variable(route_name, 'cluster_name', route.metadata.clusterName)
|
self.inventory.set_variable(route_name, 'cluster_name', route.metadata.cluster_name)
|
||||||
self.inventory.set_variable(route_name, 'object_type', 'route')
|
self.inventory.set_variable(route_name, 'object_type', 'route')
|
||||||
self.inventory.set_variable(route_name, 'self_link', route.metadata.selfLink)
|
self.inventory.set_variable(route_name, 'self_link', route.metadata.self_link)
|
||||||
self.inventory.set_variable(route_name, 'resource_version', route.metadata.resourceVersion)
|
self.inventory.set_variable(route_name, 'resource_version', route.metadata.resource_version)
|
||||||
self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
|
self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
|
||||||
|
|
||||||
if route.spec.host:
|
if route.spec.host:
|
||||||
|
@ -309,5 +327,5 @@ class OpenShiftInventoryHelper(K8sInventoryHelper):
|
||||||
if route.spec.path:
|
if route.spec.path:
|
||||||
self.inventory.set_variable(route_name, 'path', route.spec.path)
|
self.inventory.set_variable(route_name, 'path', route.spec.path)
|
||||||
|
|
||||||
if hasattr(route.spec.port, 'targetPort') and route.spec.port.targetPort:
|
if hasattr(route.spec.port, 'target_port') and route.spec.port.target_port:
|
||||||
self.inventory.set_variable(route_name, 'port', route.spec.port)
|
self.inventory.set_variable(route_name, 'port', route.spec.port)
|
||||||
|
|
208
lib/ansible/module_utils/k8s/lookup.py
Normal file
208
lib/ansible/module_utils/k8s/lookup.py
Normal file
|
@ -0,0 +1,208 @@
|
||||||
|
#
|
||||||
|
# Copyright 2018 Red Hat | Ansible
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
from ansible.module_utils.k8s.common import OpenShiftAnsibleModuleMixin, DateTimeEncoder, remove_secret_data, to_snake
|
||||||
|
from ansible.module_utils.k8s.helper import AUTH_ARG_SPEC
|
||||||
|
|
||||||
|
try:
|
||||||
|
from openshift.helper.kubernetes import KubernetesObjectHelper
|
||||||
|
from openshift.helper.exceptions import KubernetesException
|
||||||
|
HAS_K8S_MODULE_HELPER = True
|
||||||
|
except ImportError as exc:
|
||||||
|
HAS_K8S_MODULE_HELPER = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import yaml
|
||||||
|
HAS_YAML = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_YAML = False
|
||||||
|
|
||||||
|
|
||||||
|
class KubernetesLookup(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
if not HAS_K8S_MODULE_HELPER:
|
||||||
|
raise Exception(
|
||||||
|
"Requires the OpenShift Python client. Try `pip install openshift`"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not HAS_YAML:
|
||||||
|
raise Exception(
|
||||||
|
"Requires PyYAML. Try `pip install PyYAML`"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.kind = None
|
||||||
|
self.name = None
|
||||||
|
self.namespace = None
|
||||||
|
self.api_version = None
|
||||||
|
self.label_selector = None
|
||||||
|
self.field_selector = None
|
||||||
|
self.include_uninitialized = None
|
||||||
|
self.resource_definition = None
|
||||||
|
self.helper = None
|
||||||
|
self.connection = {}
|
||||||
|
|
||||||
|
def run(self, terms, variables=None, **kwargs):
|
||||||
|
self.kind = kwargs.get('kind')
|
||||||
|
self.name = kwargs.get('resource_name')
|
||||||
|
self.namespace = kwargs.get('namespace')
|
||||||
|
self.api_version = kwargs.get('api_version', 'v1')
|
||||||
|
self.label_selector = kwargs.get('label_selector')
|
||||||
|
self.field_selector = kwargs.get('field_selector')
|
||||||
|
self.include_uninitialized = kwargs.get('include_uninitialized', False)
|
||||||
|
|
||||||
|
resource_definition = kwargs.get('resource_definition')
|
||||||
|
src = kwargs.get('src')
|
||||||
|
if src:
|
||||||
|
resource_definition = self.load_resource_definition(src)
|
||||||
|
if resource_definition:
|
||||||
|
self.params_from_resource_definition(resource_definition)
|
||||||
|
|
||||||
|
if not self.kind:
|
||||||
|
raise Exception(
|
||||||
|
"Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration "
|
||||||
|
"using the 'resource_definition' parameter."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.kind = to_snake(self.kind)
|
||||||
|
self.helper = self.get_helper(self.api_version, self.kind)
|
||||||
|
|
||||||
|
auth_args = ('host', 'api_key', 'kubeconfig', 'context', 'username', 'password',
|
||||||
|
'cert_file', 'key_file', 'ssl_ca_cert', 'verify_ssl')
|
||||||
|
|
||||||
|
for arg in AUTH_ARG_SPEC:
|
||||||
|
if arg in auth_args and kwargs.get(arg) is not None:
|
||||||
|
self.connection[arg] = kwargs.get(arg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.helper.set_client_config(**self.connection)
|
||||||
|
except Exception as exc:
|
||||||
|
raise Exception(
|
||||||
|
"Client authentication failed: {0}".format(exc.message)
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.name:
|
||||||
|
return self.get_object()
|
||||||
|
|
||||||
|
return self.list_objects()
|
||||||
|
|
||||||
|
def get_helper(self, api_version, kind):
|
||||||
|
try:
|
||||||
|
helper = KubernetesObjectHelper(api_version=api_version, kind=kind, debug=False)
|
||||||
|
helper.get_model(api_version, kind)
|
||||||
|
return helper
|
||||||
|
except KubernetesException as exc:
|
||||||
|
raise Exception("Error initializing helper: {0}".format(exc.message))
|
||||||
|
|
||||||
|
def load_resource_definition(self, src):
|
||||||
|
""" Load the requested src path """
|
||||||
|
path = os.path.normpath(src)
|
||||||
|
if not os.path.exists(path):
|
||||||
|
raise Exception("Error accessing {0}. Does the file exist?".format(path))
|
||||||
|
try:
|
||||||
|
result = yaml.safe_load(open(path, 'r'))
|
||||||
|
except (IOError, yaml.YAMLError) as exc:
|
||||||
|
raise Exception("Error loading resource_definition: {0}".format(exc))
|
||||||
|
return result
|
||||||
|
|
||||||
|
def params_from_resource_definition(self, defn):
|
||||||
|
if defn.get('apiVersion'):
|
||||||
|
self.api_version = defn['apiVersion']
|
||||||
|
if defn.get('kind'):
|
||||||
|
self.kind = defn['kind']
|
||||||
|
if defn.get('metadata', {}).get('name'):
|
||||||
|
self.name = defn['metadata']['name']
|
||||||
|
if defn.get('metadata', {}).get('namespace'):
|
||||||
|
self.namespace = defn['metadata']['namespace']
|
||||||
|
|
||||||
|
def get_object(self):
|
||||||
|
""" Fetch a named object """
|
||||||
|
try:
|
||||||
|
result = self.helper.get_object(self.name, self.namespace)
|
||||||
|
except KubernetesException as exc:
|
||||||
|
raise Exception('Failed to retrieve requested object: {0}'.format(exc.message))
|
||||||
|
response = []
|
||||||
|
if result is not None:
|
||||||
|
# Convert Datetime objects to ISO format
|
||||||
|
result_json = json.loads(json.dumps(result.to_dict(), cls=DateTimeEncoder))
|
||||||
|
if self.kind == 'secret':
|
||||||
|
remove_secret_data(result_json)
|
||||||
|
response.append(result_json)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
def list_objects(self):
|
||||||
|
""" Query for a set of objects """
|
||||||
|
if self.namespace:
|
||||||
|
method_name = 'list_namespaced_{0}'.format(self.kind)
|
||||||
|
try:
|
||||||
|
method = self.helper.lookup_method(method_name=method_name)
|
||||||
|
except KubernetesException:
|
||||||
|
raise Exception(
|
||||||
|
"Failed to find method {0} for API {1}".format(method_name, self.api_version)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
method_name = 'list_{0}_for_all_namespaces'.format(self.kind)
|
||||||
|
try:
|
||||||
|
method = self.helper.lookup_method(method_name=method_name)
|
||||||
|
except KubernetesException:
|
||||||
|
method_name = 'list_{0}'.format(self.kind)
|
||||||
|
try:
|
||||||
|
method = self.helper.lookup_method(method_name=method_name)
|
||||||
|
except KubernetesException:
|
||||||
|
raise Exception(
|
||||||
|
"Failed to find method for API {0} and Kind {1}".format(self.api_version, self.kind)
|
||||||
|
)
|
||||||
|
|
||||||
|
params = {}
|
||||||
|
if self.field_selector:
|
||||||
|
params['field_selector'] = self.field_selector
|
||||||
|
if self.label_selector:
|
||||||
|
params['label_selector'] = self.label_selector
|
||||||
|
params['include_uninitialized'] = self.include_uninitialized
|
||||||
|
|
||||||
|
if self.namespace:
|
||||||
|
try:
|
||||||
|
result = method(self.namespace, **params)
|
||||||
|
except KubernetesException as exc:
|
||||||
|
raise Exception(exc.message)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
result = method(**params)
|
||||||
|
except KubernetesException as exc:
|
||||||
|
raise Exception(exc.message)
|
||||||
|
|
||||||
|
response = []
|
||||||
|
if result is not None:
|
||||||
|
# Convert Datetime objects to ISO format
|
||||||
|
result_json = json.loads(json.dumps(result.to_dict(), cls=DateTimeEncoder))
|
||||||
|
response = result_json.get('items', [])
|
||||||
|
if self.kind == 'secret':
|
||||||
|
for item in response:
|
||||||
|
remove_secret_data(item)
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
class OpenShiftLookup(OpenShiftAnsibleModuleMixin, KubernetesLookup):
|
||||||
|
pass
|
|
@ -18,12 +18,13 @@
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
from ansible.module_utils.k8s.common import KubernetesAnsibleModule
|
from ansible.module_utils.k8s.helper import COMMON_ARG_SPEC, AUTH_ARG_SPEC, OPENSHIFT_ARG_SPEC
|
||||||
|
from ansible.module_utils.k8s.common import KubernetesAnsibleModule, OpenShiftAnsibleModuleMixin, to_snake
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ConflictError
|
from openshift.helper.exceptions import KubernetesException
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# Exception handled in common
|
# Exception handled in common
|
||||||
pass
|
pass
|
||||||
|
@ -32,8 +33,6 @@ except ImportError:
|
||||||
class KubernetesRawModule(KubernetesAnsibleModule):
|
class KubernetesRawModule(KubernetesAnsibleModule):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self.client = None
|
|
||||||
|
|
||||||
mutually_exclusive = [
|
mutually_exclusive = [
|
||||||
('resource_definition', 'src'),
|
('resource_definition', 'src'),
|
||||||
]
|
]
|
||||||
|
@ -43,140 +42,170 @@ class KubernetesRawModule(KubernetesAnsibleModule):
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
kind = self.params.pop('kind')
|
self.kind = self.params.pop('kind')
|
||||||
api_version = self.params.pop('api_version')
|
self.api_version = self.params.pop('api_version')
|
||||||
name = self.params.pop('name')
|
self.resource_definition = self.params.pop('resource_definition')
|
||||||
namespace = self.params.pop('namespace')
|
self.src = self.params.pop('src')
|
||||||
resource_definition = self.params.pop('resource_definition')
|
if self.src:
|
||||||
if resource_definition:
|
self.resource_definition = self.load_resource_definition(self.src)
|
||||||
self.resource_definitions = [resource_definition]
|
|
||||||
src = self.params.pop('src')
|
|
||||||
if src:
|
|
||||||
self.resource_definitions = self.load_resource_definitions(src)
|
|
||||||
|
|
||||||
if not resource_definition and not src:
|
if self.resource_definition:
|
||||||
self.resource_definitions = [{
|
self.api_version = self.resource_definition.get('apiVersion')
|
||||||
'kind': kind,
|
self.kind = self.resource_definition.get('kind')
|
||||||
'apiVersion': api_version,
|
|
||||||
'metadata': {
|
self.api_version = self.api_version.lower()
|
||||||
'name': name,
|
self.kind = to_snake(self.kind)
|
||||||
'namespace': namespace
|
|
||||||
}
|
if not self.api_version:
|
||||||
}]
|
self.fail_json(
|
||||||
|
msg=("Error: no api_version specified. Use the api_version parameter, or provide it as part of a ",
|
||||||
|
"resource_definition.")
|
||||||
|
)
|
||||||
|
if not self.kind:
|
||||||
|
self.fail_json(
|
||||||
|
msg="Error: no kind specified. Use the kind parameter, or provide it as part of a resource_definition"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.helper = self.get_helper(self.api_version, self.kind)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def argspec(self):
|
||||||
|
argspec = copy.deepcopy(COMMON_ARG_SPEC)
|
||||||
|
argspec.update(copy.deepcopy(AUTH_ARG_SPEC))
|
||||||
|
return argspec
|
||||||
|
|
||||||
def execute_module(self):
|
def execute_module(self):
|
||||||
changed = False
|
if self.resource_definition:
|
||||||
results = []
|
resource_params = self.resource_to_parameters(self.resource_definition)
|
||||||
self.client = self.get_api_client()
|
self.params.update(resource_params)
|
||||||
for definition in self.resource_definitions:
|
|
||||||
kind = definition.get('kind')
|
|
||||||
search_kind = kind
|
|
||||||
if kind.lower().endswith('list'):
|
|
||||||
search_kind = kind[:-4]
|
|
||||||
api_version = definition.get('apiVersion')
|
|
||||||
try:
|
|
||||||
resource = self.client.resources.get(kind=search_kind, api_version=api_version)
|
|
||||||
except Exception as e:
|
|
||||||
self.fail_json(msg='Failed to find resource {0}.{1}: {2}'.format(
|
|
||||||
api_version, search_kind, e
|
|
||||||
))
|
|
||||||
result = self.perform_action(resource, definition)
|
|
||||||
changed = changed or result['changed']
|
|
||||||
results.append(result)
|
|
||||||
|
|
||||||
if len(results) == 1:
|
self.authenticate()
|
||||||
self.exit_json(**results[0])
|
|
||||||
|
|
||||||
self.exit_json(**{
|
|
||||||
'changed': changed,
|
|
||||||
'result': {
|
|
||||||
'results': results
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
def perform_action(self, resource, definition):
|
|
||||||
result = {'changed': False, 'result': {}}
|
|
||||||
state = self.params.pop('state', None)
|
state = self.params.pop('state', None)
|
||||||
force = self.params.pop('force', False)
|
force = self.params.pop('force', False)
|
||||||
name = definition.get('metadata', {}).get('name')
|
name = self.params.get('name')
|
||||||
namespace = definition.get('metadata', {}).get('namespace')
|
namespace = self.params.get('namespace')
|
||||||
existing = None
|
existing = None
|
||||||
|
|
||||||
self.remove_aliases()
|
self.remove_aliases()
|
||||||
|
|
||||||
if definition['kind'].endswith('list'):
|
return_attributes = dict(changed=False, result=dict())
|
||||||
result['result'] = resource.get(namespace=namespace).to_dict()
|
|
||||||
result['changed'] = False
|
if self.helper.base_model_name_snake.endswith('list'):
|
||||||
result['method'] = 'get'
|
k8s_obj = self._read(name, namespace)
|
||||||
return result
|
return_attributes['result'] = k8s_obj.to_dict()
|
||||||
|
self.exit_json(**return_attributes)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
existing = resource.get(name=name, namespace=namespace)
|
existing = self.helper.get_object(name, namespace)
|
||||||
except NotFoundError:
|
except KubernetesException as exc:
|
||||||
pass
|
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.message),
|
||||||
except DynamicApiError as exc:
|
error=exc.value.get('status'))
|
||||||
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
|
|
||||||
error=exc.status, status=exc.status, reason=exc.reason)
|
|
||||||
|
|
||||||
if state == 'absent':
|
if state == 'absent':
|
||||||
result['method'] = "delete"
|
|
||||||
if not existing:
|
if not existing:
|
||||||
# The object already does not exist
|
# The object already does not exist
|
||||||
return result
|
self.exit_json(**return_attributes)
|
||||||
else:
|
else:
|
||||||
# Delete the object
|
# Delete the object
|
||||||
if not self.check_mode:
|
if not self.check_mode:
|
||||||
try:
|
try:
|
||||||
k8s_obj = resource.delete(name, namespace=namespace)
|
self.helper.delete_object(name, namespace)
|
||||||
result['result'] = k8s_obj.to_dict()
|
except KubernetesException as exc:
|
||||||
except DynamicApiError as exc:
|
self.fail_json(msg="Failed to delete object: {0}".format(exc.message),
|
||||||
self.fail_json(msg="Failed to delete object: {0}".format(exc.body),
|
error=exc.value.get('status'))
|
||||||
error=exc.status, status=exc.status, reason=exc.reason)
|
return_attributes['changed'] = True
|
||||||
result['changed'] = True
|
self.exit_json(**return_attributes)
|
||||||
return result
|
|
||||||
else:
|
else:
|
||||||
if not existing:
|
if not existing:
|
||||||
if not self.check_mode:
|
k8s_obj = self._create(namespace)
|
||||||
try:
|
return_attributes['result'] = k8s_obj.to_dict()
|
||||||
k8s_obj = resource.create(definition, namespace=namespace)
|
return_attributes['changed'] = True
|
||||||
except ConflictError:
|
self.exit_json(**return_attributes)
|
||||||
# Some resources, like ProjectRequests, can't be created multiple times,
|
|
||||||
# because the resources that they create don't match their kind
|
|
||||||
# In this case we'll mark it as unchanged and warn the user
|
|
||||||
self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \
|
|
||||||
if the resource you are creating does not directly create a resource of the same kind.".format(name))
|
|
||||||
return result
|
|
||||||
result['result'] = k8s_obj.to_dict()
|
|
||||||
result['changed'] = True
|
|
||||||
result['method'] = 'create'
|
|
||||||
return result
|
|
||||||
|
|
||||||
if existing and force:
|
if existing and force:
|
||||||
|
k8s_obj = None
|
||||||
|
request_body = self.helper.request_body_from_params(self.params)
|
||||||
if not self.check_mode:
|
if not self.check_mode:
|
||||||
try:
|
try:
|
||||||
k8s_obj = resource.replace(definition, name=name, namespace=namespace)
|
k8s_obj = self.helper.replace_object(name, namespace, body=request_body)
|
||||||
result['result'] = k8s_obj.to_dict()
|
except KubernetesException as exc:
|
||||||
except DynamicApiError as exc:
|
self.fail_json(msg="Failed to replace object: {0}".format(exc.message),
|
||||||
self.fail_json(msg="Failed to replace object: {0}".format(exc.body),
|
error=exc.value.get('status'))
|
||||||
error=exc.status, status=exc.status, reason=exc.reason)
|
return_attributes['result'] = k8s_obj.to_dict()
|
||||||
result['changed'] = True
|
return_attributes['changed'] = True
|
||||||
result['method'] = 'replace'
|
self.exit_json(**return_attributes)
|
||||||
return result
|
|
||||||
|
|
||||||
match, diffs = self.diff_objects(existing.to_dict(), definition)
|
|
||||||
|
|
||||||
|
# Check if existing object should be patched
|
||||||
|
k8s_obj = copy.deepcopy(existing)
|
||||||
|
try:
|
||||||
|
self.helper.object_from_params(self.params, obj=k8s_obj)
|
||||||
|
except KubernetesException as exc:
|
||||||
|
self.fail_json(msg="Failed to patch object: {0}".format(exc.message))
|
||||||
|
match, diff = self.helper.objects_match(self.helper.fix_serialization(existing), k8s_obj)
|
||||||
if match:
|
if match:
|
||||||
result['result'] = existing.to_dict()
|
return_attributes['result'] = existing.to_dict()
|
||||||
return result
|
self.exit_json(**return_attributes)
|
||||||
# Differences exist between the existing obj and requested params
|
# Differences exist between the existing obj and requested params
|
||||||
if not self.check_mode:
|
if not self.check_mode:
|
||||||
try:
|
try:
|
||||||
k8s_obj = resource.patch(definition, name=name, namespace=namespace)
|
k8s_obj = self.helper.patch_object(name, namespace, k8s_obj)
|
||||||
result['result'] = k8s_obj.to_dict()
|
except KubernetesException as exc:
|
||||||
except DynamicApiError as exc:
|
self.fail_json(msg="Failed to patch object: {0}".format(exc.message))
|
||||||
self.fail_json(msg="Failed to patch object: {0}".format(exc.body),
|
return_attributes['result'] = k8s_obj.to_dict()
|
||||||
error=exc.status, status=exc.status, reason=exc.reason)
|
return_attributes['changed'] = True
|
||||||
result['changed'] = True
|
self.exit_json(**return_attributes)
|
||||||
result['method'] = 'patch'
|
|
||||||
result['diff'] = diffs
|
def _create(self, namespace):
|
||||||
return result
|
request_body = None
|
||||||
|
k8s_obj = None
|
||||||
|
try:
|
||||||
|
request_body = self.helper.request_body_from_params(self.params)
|
||||||
|
except KubernetesException as exc:
|
||||||
|
self.fail_json(msg="Failed to create object: {0}".format(exc.message))
|
||||||
|
if not self.check_mode:
|
||||||
|
try:
|
||||||
|
k8s_obj = self.helper.create_object(namespace, body=request_body)
|
||||||
|
except KubernetesException as exc:
|
||||||
|
self.fail_json(msg="Failed to create object: {0}".format(exc.message),
|
||||||
|
error=exc.value.get('status'))
|
||||||
|
return k8s_obj
|
||||||
|
|
||||||
|
def _read(self, name, namespace):
|
||||||
|
k8s_obj = None
|
||||||
|
try:
|
||||||
|
k8s_obj = self.helper.get_object(name, namespace)
|
||||||
|
except KubernetesException as exc:
|
||||||
|
self.fail_json(msg='Failed to retrieve requested object',
|
||||||
|
error=exc.value.get('status'))
|
||||||
|
return k8s_obj
|
||||||
|
|
||||||
|
|
||||||
|
class OpenShiftRawModule(OpenShiftAnsibleModuleMixin, KubernetesRawModule):
|
||||||
|
|
||||||
|
@property
|
||||||
|
def argspec(self):
|
||||||
|
args = super(OpenShiftRawModule, self).argspec
|
||||||
|
args.update(copy.deepcopy(OPENSHIFT_ARG_SPEC))
|
||||||
|
return args
|
||||||
|
|
||||||
|
def _create(self, namespace):
|
||||||
|
if self.kind.lower() == 'project':
|
||||||
|
return self._create_project()
|
||||||
|
return KubernetesRawModule._create(self, namespace)
|
||||||
|
|
||||||
|
def _create_project(self):
|
||||||
|
new_obj = None
|
||||||
|
k8s_obj = None
|
||||||
|
try:
|
||||||
|
new_obj = self.helper.object_from_params(self.params)
|
||||||
|
except KubernetesException as exc:
|
||||||
|
self.fail_json(msg="Failed to create object: {0}".format(exc.message))
|
||||||
|
try:
|
||||||
|
k8s_obj = self.helper.create_project(metadata=new_obj.metadata,
|
||||||
|
display_name=self.params.get('display_name'),
|
||||||
|
description=self.params.get('description'))
|
||||||
|
except KubernetesException as exc:
|
||||||
|
self.fail_json(msg='Failed to retrieve requested object',
|
||||||
|
error=exc.value.get('status'))
|
||||||
|
return k8s_obj
|
||||||
|
|
|
@ -22,12 +22,13 @@ import copy
|
||||||
import math
|
import math
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from ansible.module_utils.six import iteritems
|
||||||
|
from ansible.module_utils.k8s.common import OpenShiftAnsibleModuleMixin
|
||||||
from ansible.module_utils.k8s.raw import KubernetesRawModule
|
from ansible.module_utils.k8s.raw import KubernetesRawModule
|
||||||
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
|
from ansible.module_utils.k8s.helper import AUTH_ARG_SPEC, COMMON_ARG_SPEC
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from openshift import watch
|
from openshift import watch
|
||||||
from openshift.dynamic.client import ResourceInstance
|
|
||||||
from openshift.helper.exceptions import KubernetesException
|
from openshift.helper.exceptions import KubernetesException
|
||||||
except ImportError as exc:
|
except ImportError as exc:
|
||||||
class KubernetesException(Exception):
|
class KubernetesException(Exception):
|
||||||
|
@ -46,12 +47,14 @@ SCALE_ARG_SPEC = {
|
||||||
class KubernetesAnsibleScaleModule(KubernetesRawModule):
|
class KubernetesAnsibleScaleModule(KubernetesRawModule):
|
||||||
|
|
||||||
def execute_module(self):
|
def execute_module(self):
|
||||||
definition = self.resource_definitions[0]
|
if self.resource_definition:
|
||||||
|
resource_params = self.resource_to_parameters(self.resource_definition)
|
||||||
|
self.params.update(resource_params)
|
||||||
|
|
||||||
self.client = self.get_api_client()
|
self.authenticate()
|
||||||
|
|
||||||
name = definition['metadata']['name']
|
name = self.params.get('name')
|
||||||
namespace = definition['metadata'].get('namespace')
|
namespace = self.params.get('namespace')
|
||||||
current_replicas = self.params.get('current_replicas')
|
current_replicas = self.params.get('current_replicas')
|
||||||
replicas = self.params.get('replicas')
|
replicas = self.params.get('replicas')
|
||||||
resource_version = self.params.get('resource_version')
|
resource_version = self.params.get('resource_version')
|
||||||
|
@ -62,10 +65,8 @@ class KubernetesAnsibleScaleModule(KubernetesRawModule):
|
||||||
existing_count = None
|
existing_count = None
|
||||||
return_attributes = dict(changed=False, result=dict())
|
return_attributes = dict(changed=False, result=dict())
|
||||||
|
|
||||||
resource = self.client.resources.get(api_version=definition['apiVersion'], kind=definition['kind'])
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
existing = resource.get(name=name, namespace=namespace)
|
existing = self.helper.get_object(name, namespace)
|
||||||
return_attributes['result'] = existing.to_dict()
|
return_attributes['result'] = existing.to_dict()
|
||||||
except KubernetesException as exc:
|
except KubernetesException as exc:
|
||||||
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.message),
|
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.message),
|
||||||
|
@ -79,7 +80,7 @@ class KubernetesAnsibleScaleModule(KubernetesRawModule):
|
||||||
if existing_count is None:
|
if existing_count is None:
|
||||||
self.fail_json(msg='Failed to retrieve the available count for the requested object.')
|
self.fail_json(msg='Failed to retrieve the available count for the requested object.')
|
||||||
|
|
||||||
if resource_version and resource_version != existing.metadata.resourceVersion:
|
if resource_version and resource_version != existing.metadata.resource_version:
|
||||||
self.exit_json(**return_attributes)
|
self.exit_json(**return_attributes)
|
||||||
|
|
||||||
if current_replicas is not None and existing_count != current_replicas:
|
if current_replicas is not None and existing_count != current_replicas:
|
||||||
|
@ -90,13 +91,25 @@ class KubernetesAnsibleScaleModule(KubernetesRawModule):
|
||||||
if not self.check_mode:
|
if not self.check_mode:
|
||||||
if self.kind == 'job':
|
if self.kind == 'job':
|
||||||
existing.spec.parallelism = replicas
|
existing.spec.parallelism = replicas
|
||||||
k8s_obj = resource.patch(existing.to_dict())
|
k8s_obj = self.helper.patch_object(name, namespace, existing)
|
||||||
else:
|
else:
|
||||||
k8s_obj = self.scale(resource, existing, replicas, wait, wait_time)
|
k8s_obj = self.scale(existing, replicas, wait, wait_time)
|
||||||
return_attributes['result'] = k8s_obj.to_dict()
|
return_attributes['result'] = k8s_obj.to_dict()
|
||||||
|
|
||||||
self.exit_json(**return_attributes)
|
self.exit_json(**return_attributes)
|
||||||
|
|
||||||
|
def resource_to_parameters(self, resource):
|
||||||
|
""" Converts a resource definition to module parameters """
|
||||||
|
parameters = {}
|
||||||
|
for key, value in iteritems(resource):
|
||||||
|
if key in ('apiVersion', 'kind', 'status'):
|
||||||
|
continue
|
||||||
|
elif key == 'metadata' and isinstance(value, dict):
|
||||||
|
for meta_key, meta_value in iteritems(value):
|
||||||
|
if meta_key in ('name', 'namespace', 'resourceVersion'):
|
||||||
|
parameters[meta_key] = meta_value
|
||||||
|
return parameters
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def argspec(self):
|
def argspec(self):
|
||||||
args = copy.deepcopy(COMMON_ARG_SPEC)
|
args = copy.deepcopy(COMMON_ARG_SPEC)
|
||||||
|
@ -106,67 +119,91 @@ class KubernetesAnsibleScaleModule(KubernetesRawModule):
|
||||||
args.update(SCALE_ARG_SPEC)
|
args.update(SCALE_ARG_SPEC)
|
||||||
return args
|
return args
|
||||||
|
|
||||||
def scale(self, resource, existing_object, replicas, wait, wait_time):
|
def scale(self, existing_object, replicas, wait, wait_time):
|
||||||
name = existing_object.metadata.name
|
name = existing_object.metadata.name
|
||||||
namespace = existing_object.metadata.namespace
|
namespace = existing_object.metadata.namespace
|
||||||
|
method_name = 'patch_namespaced_{0}_scale'.format(self.kind)
|
||||||
|
method = None
|
||||||
|
model = None
|
||||||
|
|
||||||
if not hasattr(resource, 'scale'):
|
try:
|
||||||
|
method = self.helper.lookup_method(method_name=method_name)
|
||||||
|
except KubernetesException:
|
||||||
self.fail_json(
|
self.fail_json(
|
||||||
msg="Cannot perform scale on resource of kind {0}".format(resource.kind)
|
msg="Failed to get method {0}. Is 'scale' a valid operation for {1}?".format(method_name, self.kind)
|
||||||
)
|
)
|
||||||
|
|
||||||
scale_obj = {'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}}
|
try:
|
||||||
|
model = self.helper.get_model(self.api_version, 'scale')
|
||||||
|
except KubernetesException:
|
||||||
|
self.fail_json(
|
||||||
|
msg="Failed to fetch the 'Scale' model for API version {0}. Are you using the correct "
|
||||||
|
"API?".format(self.api_version)
|
||||||
|
)
|
||||||
|
|
||||||
|
scale_obj = model()
|
||||||
|
scale_obj.kind = 'scale'
|
||||||
|
scale_obj.api_version = self.api_version.lower()
|
||||||
|
scale_obj.metadata = self.helper.get_model(
|
||||||
|
self.api_version,
|
||||||
|
self.helper.get_base_model_name(scale_obj.swagger_types['metadata'])
|
||||||
|
)()
|
||||||
|
scale_obj.metadata.name = name
|
||||||
|
scale_obj.metadata.namespace = namespace
|
||||||
|
scale_obj.spec = self.helper.get_model(
|
||||||
|
self.api_version,
|
||||||
|
self.helper.get_base_model_name(scale_obj.swagger_types['spec'])
|
||||||
|
)()
|
||||||
|
scale_obj.spec.replicas = replicas
|
||||||
|
|
||||||
return_obj = None
|
return_obj = None
|
||||||
stream = None
|
stream = None
|
||||||
|
|
||||||
if wait:
|
if wait:
|
||||||
w, stream = self._create_stream(resource, namespace, wait_time)
|
w, stream = self._create_stream(namespace, wait_time)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resource.scale.patch(body=scale_obj)
|
method(name, namespace, scale_obj)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
self.fail_json(
|
self.fail_json(
|
||||||
msg="Scale request failed: {0}".format(exc.message)
|
msg="Scale request failed: {0}".format(exc.message)
|
||||||
)
|
)
|
||||||
|
|
||||||
if wait and stream is not None:
|
if wait and stream is not None:
|
||||||
return_obj = self._read_stream(resource, w, stream, name, replicas)
|
return_obj = self._read_stream(w, stream, name, replicas)
|
||||||
|
|
||||||
if not return_obj:
|
if not return_obj:
|
||||||
return_obj = self._wait_for_response(name, namespace)
|
return_obj = self._wait_for_response(name, namespace)
|
||||||
|
|
||||||
return return_obj
|
return return_obj
|
||||||
|
|
||||||
def _create_stream(self, resource, namespace, wait_time):
|
def _create_stream(self, namespace, wait_time):
|
||||||
""" Create a stream of events for the object """
|
""" Create a stream of events for the object """
|
||||||
w = None
|
w = None
|
||||||
stream = None
|
stream = None
|
||||||
try:
|
try:
|
||||||
|
list_method = self.helper.lookup_method('list', namespace)
|
||||||
w = watch.Watch()
|
w = watch.Watch()
|
||||||
w._api_client = self.client.client
|
w._api_client = self.helper.api_client
|
||||||
if namespace:
|
if namespace:
|
||||||
stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time)
|
stream = w.stream(list_method, namespace, timeout_seconds=wait_time)
|
||||||
else:
|
else:
|
||||||
stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time)
|
stream = w.stream(list_method, timeout_seconds=wait_time)
|
||||||
except KubernetesException:
|
except KubernetesException:
|
||||||
pass
|
pass
|
||||||
|
except Exception:
|
||||||
|
raise
|
||||||
return w, stream
|
return w, stream
|
||||||
|
|
||||||
def _read_stream(self, resource, watcher, stream, name, replicas):
|
def _read_stream(self, watcher, stream, name, replicas):
|
||||||
""" Wait for ready_replicas to equal the requested number of replicas. """
|
""" Wait for ready_replicas to equal the requested number of replicas. """
|
||||||
return_obj = None
|
return_obj = None
|
||||||
try:
|
try:
|
||||||
for event in stream:
|
for event in stream:
|
||||||
if event.get('object'):
|
if event.get('object'):
|
||||||
obj = ResourceInstance(resource, event['object'])
|
obj = event['object']
|
||||||
if obj.metadata.name == name and hasattr(obj, 'status'):
|
if obj.metadata.name == name and hasattr(obj, 'status'):
|
||||||
if replicas == 0:
|
if hasattr(obj.status, 'ready_replicas') and obj.status.ready_replicas == replicas:
|
||||||
if not hasattr(obj.status, 'readyReplicas') or not obj.status.readyReplicas:
|
|
||||||
return_obj = obj
|
|
||||||
watcher.stop()
|
|
||||||
break
|
|
||||||
if hasattr(obj.status, 'readyReplicas') and obj.status.readyReplicas == replicas:
|
|
||||||
return_obj = obj
|
return_obj = obj
|
||||||
watcher.stop()
|
watcher.stop()
|
||||||
break
|
break
|
||||||
|
@ -175,23 +212,27 @@ class KubernetesAnsibleScaleModule(KubernetesRawModule):
|
||||||
|
|
||||||
if not return_obj:
|
if not return_obj:
|
||||||
self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
|
self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
|
||||||
if replicas and return_obj.status.readyReplicas is None:
|
if return_obj.status.ready_replicas is None:
|
||||||
self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
|
self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
|
||||||
if replicas and return_obj.status.readyReplicas != replicas:
|
if return_obj.status.ready_replicas != replicas:
|
||||||
self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
|
self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
|
||||||
"the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
|
"the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
|
||||||
return return_obj
|
return return_obj
|
||||||
|
|
||||||
def _wait_for_response(self, resource, name, namespace):
|
def _wait_for_response(self, name, namespace):
|
||||||
""" Wait for an API response """
|
""" Wait for an API response """
|
||||||
tries = 0
|
tries = 0
|
||||||
half = math.ceil(20 / 2)
|
half = math.ceil(20 / 2)
|
||||||
obj = None
|
obj = None
|
||||||
|
|
||||||
while tries <= half:
|
while tries <= half:
|
||||||
obj = resource.get(name=name, namespace=namespace)
|
obj = self.helper.get_object(name, namespace)
|
||||||
if obj:
|
if obj:
|
||||||
break
|
break
|
||||||
tries += 2
|
tries += 2
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
return obj
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
class OpenShiftAnsibleScaleModule(OpenShiftAnsibleModuleMixin, KubernetesAnsibleScaleModule):
|
||||||
|
pass
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
k8s.py
|
|
|
@ -15,15 +15,13 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
|
|
||||||
module: k8s
|
module: k8s_raw
|
||||||
|
|
||||||
short_description: Manage Kubernetes (K8s) objects
|
short_description: Manage Kubernetes (K8s) objects
|
||||||
|
|
||||||
version_added: "2.6"
|
version_added: "2.5"
|
||||||
|
|
||||||
author:
|
author: "Chris Houseknecht (@chouseknecht)"
|
||||||
- "Chris Houseknecht (@chouseknecht)"
|
|
||||||
- "Fabian von Feilitzsch (@fabianvf)"
|
|
||||||
|
|
||||||
description:
|
description:
|
||||||
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
|
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
|
||||||
|
@ -41,7 +39,7 @@ extends_documentation_fragment:
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
- "python >= 2.7"
|
- "python >= 2.7"
|
||||||
- "openshift >= 0.6"
|
- "openshift == 0.4.3"
|
||||||
- "PyYAML >= 3.11"
|
- "PyYAML >= 3.11"
|
||||||
'''
|
'''
|
||||||
|
|
|
@ -21,9 +21,7 @@ short_description: Set a new size for a Deployment, ReplicaSet, Replication Cont
|
||||||
|
|
||||||
version_added: "2.5"
|
version_added: "2.5"
|
||||||
|
|
||||||
author:
|
author: "Chris Houseknecht (@chouseknecht)"
|
||||||
- "Chris Houseknecht (@chouseknecht)"
|
|
||||||
- "Fabian von Feilitzsch (@fabianvf)"
|
|
||||||
|
|
||||||
description:
|
description:
|
||||||
- Similar to the kubectl scale command. Use to set the number of replicas for a Deployment, ReplicatSet,
|
- Similar to the kubectl scale command. Use to set the number of replicas for a Deployment, ReplicatSet,
|
||||||
|
@ -37,7 +35,7 @@ extends_documentation_fragment:
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
- "python >= 2.7"
|
- "python >= 2.7"
|
||||||
- "openshift >= 0.6"
|
- "openshift == 0.4.3"
|
||||||
- "PyYAML >= 3.11"
|
- "PyYAML >= 3.11"
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
../k8s/k8s.py
|
|
|
@ -1 +0,0 @@
|
||||||
../k8s/k8s_scale.py
|
|
204
lib/ansible/modules/clustering/openshift/openshift_raw.py
Normal file
204
lib/ansible/modules/clustering/openshift/openshift_raw.py
Normal file
|
@ -0,0 +1,204 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2018, Chris Houseknecht <@chouseknecht>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||||
|
'status': ['preview'],
|
||||||
|
'supported_by': 'community'}
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
|
||||||
|
module: openshift_raw
|
||||||
|
|
||||||
|
short_description: Manage OpenShift objects
|
||||||
|
|
||||||
|
version_added: "2.5"
|
||||||
|
|
||||||
|
author: "Chris Houseknecht (@chouseknecht)"
|
||||||
|
|
||||||
|
description:
|
||||||
|
- Use the OpenShift Python client to perform CRUD operations on OpenShift objects.
|
||||||
|
- Pass the object definition from a source file or inline. See examples for reading
|
||||||
|
files and using Jinja templates.
|
||||||
|
- Access to the full range of K8s and OpenShift APIs.
|
||||||
|
- Authenticate using either a config file, certificates, password or token.
|
||||||
|
- Supports check mode.
|
||||||
|
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- k8s_state_options
|
||||||
|
- k8s_name_options
|
||||||
|
- k8s_resource_options
|
||||||
|
- k8s_auth_options
|
||||||
|
|
||||||
|
options:
|
||||||
|
description:
|
||||||
|
description:
|
||||||
|
- Use only when creating a project, otherwise ignored. Adds a description to the project
|
||||||
|
metadata.
|
||||||
|
display_name:
|
||||||
|
description:
|
||||||
|
- Use only when creating a project, otherwise ignored. Adds a display name to the project
|
||||||
|
metadata.
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
- "python >= 2.7"
|
||||||
|
- "openshift == 0.4.3"
|
||||||
|
- "PyYAML >= 3.11"
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: Create a project
|
||||||
|
openshift_raw:
|
||||||
|
api_version: v1
|
||||||
|
kind: Project
|
||||||
|
name: testing
|
||||||
|
description: Testing
|
||||||
|
display_name: "This is a test project."
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Create a Persistent Volume Claim from an inline definition
|
||||||
|
openshift_raw:
|
||||||
|
state: present
|
||||||
|
definition:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elastic-volume
|
||||||
|
namespace: testing
|
||||||
|
spec:
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 5Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
|
||||||
|
- name: Create a Deployment from an inline definition
|
||||||
|
openshift_raw:
|
||||||
|
state: present
|
||||||
|
definition:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: DeploymentConfig
|
||||||
|
metadata:
|
||||||
|
name: elastic
|
||||||
|
labels:
|
||||||
|
app: galaxy
|
||||||
|
service: elastic
|
||||||
|
namespace: testing
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: galaxy
|
||||||
|
service: elastic
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: elastic
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /usr/share/elasticsearch/data
|
||||||
|
name: elastic-volume
|
||||||
|
command: ["elasticsearch"]
|
||||||
|
image: "ansible/galaxy-elasticsearch:2.4.6"
|
||||||
|
volumes:
|
||||||
|
- name: elastic-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: elastic-volume
|
||||||
|
replicas: 1
|
||||||
|
strategy:
|
||||||
|
type: Rolling
|
||||||
|
|
||||||
|
- name: Remove an existing Deployment
|
||||||
|
openshift_raw:
|
||||||
|
api_version: v1
|
||||||
|
kind: DeploymentConfig
|
||||||
|
name: elastic
|
||||||
|
namespace: testing
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Create a Secret
|
||||||
|
openshift_raw:
|
||||||
|
definition:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: mysecret
|
||||||
|
namespace: testing
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
username: "{{ 'admin' | b64encode }}"
|
||||||
|
password: "{{ 'foobard' | b64encode }}"
|
||||||
|
|
||||||
|
- name: Retrieve a Secret
|
||||||
|
openshift_raw:
|
||||||
|
api: v1
|
||||||
|
kind: Secret
|
||||||
|
name: mysecret
|
||||||
|
namespace: testing
|
||||||
|
register: mysecret
|
||||||
|
|
||||||
|
# Passing the object definition from a file
|
||||||
|
|
||||||
|
- name: Create a Deployment by reading the definition from a local file
|
||||||
|
openshift_raw:
|
||||||
|
state: present
|
||||||
|
src: /testing/deployment.yml
|
||||||
|
|
||||||
|
- name: Read definition file from the Ansible controller file system
|
||||||
|
openshift_raw:
|
||||||
|
state: present
|
||||||
|
definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
|
||||||
|
|
||||||
|
- name: Read definition file from the Ansible controller file system after Jinja templating
|
||||||
|
openshift_raw:
|
||||||
|
state: present
|
||||||
|
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
result:
|
||||||
|
description:
|
||||||
|
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
contains:
|
||||||
|
api_version:
|
||||||
|
description: The versioned schema of this representation of an object.
|
||||||
|
returned: success
|
||||||
|
type: str
|
||||||
|
kind:
|
||||||
|
description: Represents the REST resource this object represents.
|
||||||
|
returned: success
|
||||||
|
type: str
|
||||||
|
metadata:
|
||||||
|
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
spec:
|
||||||
|
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
status:
|
||||||
|
description: Current status details for the object.
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
items:
|
||||||
|
description: Returned only when the I(kind) is a List type resource. Contains a set of objects.
|
||||||
|
returned: when resource is a List
|
||||||
|
type: list
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.module_utils.k8s.raw import OpenShiftRawModule
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
OpenShiftRawModule().execute_module()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
127
lib/ansible/modules/clustering/openshift/openshift_scale.py
Normal file
127
lib/ansible/modules/clustering/openshift/openshift_scale.py
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2018, Chris Houseknecht <@chouseknecht>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
|
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||||
|
'status': ['preview'],
|
||||||
|
'supported_by': 'community'}
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
|
||||||
|
module: openshift_scale
|
||||||
|
|
||||||
|
short_description: Set a new size for a Deployment Config, Deployment, Replica Set, Replication Controller, or Job.
|
||||||
|
|
||||||
|
version_added: "2.5"
|
||||||
|
|
||||||
|
author: "Chris Houseknecht (@chouseknecht)"
|
||||||
|
|
||||||
|
description:
|
||||||
|
- Similar to the oc scale command. Use to set the number of replicas for a Deployment Config, Deployment,
|
||||||
|
ReplicatSet, or Replication Controller, or the parallelism attribute of a Job. Supports check mode.
|
||||||
|
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- k8s_name_options
|
||||||
|
- k8s_auth_options
|
||||||
|
- k8s_resource_options
|
||||||
|
- k8s_scale_options
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
- "python >= 2.7"
|
||||||
|
- "openshift == 0.4.3"
|
||||||
|
- "PyYAML >= 3.11"
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: Scale deployment config up, and extend timeout
|
||||||
|
openshift_scale:
|
||||||
|
api_version: v1
|
||||||
|
kind: DeploymentConfig
|
||||||
|
name: elastic
|
||||||
|
namespace: myproject
|
||||||
|
replicas: 3
|
||||||
|
wait_timeout: 60
|
||||||
|
|
||||||
|
- name: Scale deployment config down when current replicas match
|
||||||
|
openshift_scale:
|
||||||
|
api_version: v1
|
||||||
|
kind: DeploymentConfig
|
||||||
|
name: elastic
|
||||||
|
namespace: myproject
|
||||||
|
current_replicas: 3
|
||||||
|
replicas: 2
|
||||||
|
|
||||||
|
- name: Increase job parallelism
|
||||||
|
openshift_scale:
|
||||||
|
api_version: batch/v1
|
||||||
|
kind: job
|
||||||
|
name: pi-with-timeout
|
||||||
|
namespace: testing
|
||||||
|
replicas: 2
|
||||||
|
|
||||||
|
# Match object using local file or inline definition
|
||||||
|
|
||||||
|
- name: Scale deployment based on a file from the local filesystem
|
||||||
|
openshift_scale:
|
||||||
|
src: /myproject/elastic_deployment.yml
|
||||||
|
replicas: 3
|
||||||
|
wait: no
|
||||||
|
|
||||||
|
- name: Scale deployment based on a template output
|
||||||
|
openshift_scale:
|
||||||
|
resource_definition: "{{ lookup('template', '/myproject/elastic_deployment.yml') | from_yaml }}"
|
||||||
|
replicas: 3
|
||||||
|
wait: no
|
||||||
|
|
||||||
|
- name: Scale deployment based on a file from the Ansible controller filesystem
|
||||||
|
openshift_scale:
|
||||||
|
resource_definition: "{{ lookup('file', '/myproject/elastic_deployment.yml') | from_yaml }}"
|
||||||
|
replicas: 3
|
||||||
|
wait: no
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
result:
|
||||||
|
description:
|
||||||
|
- If a change was made, will return the patched object, otherwise returns the existing object.
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
contains:
|
||||||
|
api_version:
|
||||||
|
description: The versioned schema of this representation of an object.
|
||||||
|
returned: success
|
||||||
|
type: str
|
||||||
|
kind:
|
||||||
|
description: Represents the REST resource this object represents.
|
||||||
|
returned: success
|
||||||
|
type: str
|
||||||
|
metadata:
|
||||||
|
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
spec:
|
||||||
|
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
status:
|
||||||
|
description: Current status details for the object.
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.module_utils.k8s.scale import OpenShiftAnsibleScaleModule
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
OpenShiftAnsibleScaleModule().execute_module()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -9,7 +9,6 @@ DOCUMENTATION = '''
|
||||||
plugin_type: inventory
|
plugin_type: inventory
|
||||||
authors:
|
authors:
|
||||||
- Chris Houseknecht <@chouseknecht>
|
- Chris Houseknecht <@chouseknecht>
|
||||||
- Fabian von Feilitzsch <@fabianvf>
|
|
||||||
|
|
||||||
short_description: Kubernetes (K8s) inventory source
|
short_description: Kubernetes (K8s) inventory source
|
||||||
|
|
||||||
|
@ -77,7 +76,7 @@ DOCUMENTATION = '''
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
- "python >= 2.7"
|
- "python >= 2.7"
|
||||||
- "openshift >= 0.6"
|
- "openshift == 0.4.1"
|
||||||
- "PyYAML >= 3.11"
|
- "PyYAML >= 3.11"
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ DOCUMENTATION = '''
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
- "python >= 2.7"
|
- "python >= 2.7"
|
||||||
- "openshift >= 0.6"
|
- "openshift == 0.4.1"
|
||||||
- "PyYAML >= 3.11"
|
- "PyYAML >= 3.11"
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
k8s.py
|
|
|
@ -29,15 +29,11 @@ DOCUMENTATION = """
|
||||||
|
|
||||||
description:
|
description:
|
||||||
- Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a
|
- Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a
|
||||||
namespace, or all matching objects for all namespaces, as well as information about the cluster.
|
namespace, or all matching objects for all namespaces.
|
||||||
- Provides access the full range of K8s APIs.
|
- Provides access the full range of K8s APIs.
|
||||||
- Enables authentication via config file, certificates, password or token.
|
- Enables authentication via config file, certificates, password or token.
|
||||||
|
|
||||||
options:
|
options:
|
||||||
cluster_info:
|
|
||||||
description:
|
|
||||||
- Use to specify the type of cluster information you are attempting to retrieve. Will take priority
|
|
||||||
over all the other options.
|
|
||||||
api_version:
|
api_version:
|
||||||
description:
|
description:
|
||||||
- Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the
|
- Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the
|
||||||
|
@ -119,7 +115,7 @@ DOCUMENTATION = """
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
- "python >= 2.7"
|
- "python >= 2.7"
|
||||||
- "openshift >= 0.6"
|
- "openshift == 0.4.1"
|
||||||
- "PyYAML >= 3.11"
|
- "PyYAML >= 3.11"
|
||||||
|
|
||||||
notes:
|
notes:
|
||||||
|
@ -193,95 +189,7 @@ RETURN = """
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from ansible.plugins.lookup import LookupBase
|
from ansible.plugins.lookup import LookupBase
|
||||||
|
from ansible.module_utils.k8s.lookup import KubernetesLookup
|
||||||
import os
|
|
||||||
|
|
||||||
from ansible.module_utils.six import iteritems
|
|
||||||
from ansible.module_utils.k8s.common import K8sAnsibleMixin
|
|
||||||
|
|
||||||
try:
|
|
||||||
from openshift.dynamic import DynamicClient
|
|
||||||
from openshift.dynamic.exceptions import NotFoundError
|
|
||||||
HAS_K8S_MODULE_HELPER = True
|
|
||||||
except ImportError as exc:
|
|
||||||
HAS_K8S_MODULE_HELPER = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
import yaml
|
|
||||||
HAS_YAML = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_YAML = False
|
|
||||||
|
|
||||||
|
|
||||||
class KubernetesLookup(K8sAnsibleMixin):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
|
|
||||||
if not HAS_K8S_MODULE_HELPER:
|
|
||||||
raise Exception(
|
|
||||||
"Requires the OpenShift Python client. Try `pip install openshift`"
|
|
||||||
)
|
|
||||||
|
|
||||||
if not HAS_YAML:
|
|
||||||
raise Exception(
|
|
||||||
"Requires PyYAML. Try `pip install PyYAML`"
|
|
||||||
)
|
|
||||||
|
|
||||||
self.kind = None
|
|
||||||
self.name = None
|
|
||||||
self.namespace = None
|
|
||||||
self.api_version = None
|
|
||||||
self.label_selector = None
|
|
||||||
self.field_selector = None
|
|
||||||
self.include_uninitialized = None
|
|
||||||
self.resource_definition = None
|
|
||||||
self.helper = None
|
|
||||||
self.connection = {}
|
|
||||||
|
|
||||||
def run(self, terms, variables=None, **kwargs):
|
|
||||||
self.params = kwargs
|
|
||||||
self.client = self.get_api_client()
|
|
||||||
|
|
||||||
cluster_info = kwargs.get('cluster_info')
|
|
||||||
if cluster_info == 'version':
|
|
||||||
return [self.client.version]
|
|
||||||
if cluster_info == 'api_groups':
|
|
||||||
return [self.client.resources.api_groups]
|
|
||||||
|
|
||||||
self.kind = kwargs.get('kind')
|
|
||||||
self.name = kwargs.get('resource_name')
|
|
||||||
self.namespace = kwargs.get('namespace')
|
|
||||||
self.api_version = kwargs.get('api_version', 'v1')
|
|
||||||
self.label_selector = kwargs.get('label_selector')
|
|
||||||
self.field_selector = kwargs.get('field_selector')
|
|
||||||
self.include_uninitialized = kwargs.get('include_uninitialized', False)
|
|
||||||
|
|
||||||
resource_definition = kwargs.get('resource_definition')
|
|
||||||
src = kwargs.get('src')
|
|
||||||
if src:
|
|
||||||
resource_definition = self.load_resource_definitions(src)[0]
|
|
||||||
if resource_definition:
|
|
||||||
self.kind = resource_definition.get('kind', self.kind)
|
|
||||||
self.api_version = resource_definition.get('apiVersion', self.api_version)
|
|
||||||
self.name = resource_definition.get('metadata', {}).get('name', self.name)
|
|
||||||
self.namespace = resource_definition.get('metadata', {}).get('namespace', self.namespace)
|
|
||||||
|
|
||||||
if not self.kind:
|
|
||||||
raise Exception(
|
|
||||||
"Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration "
|
|
||||||
"using the 'resource_definition' parameter."
|
|
||||||
)
|
|
||||||
|
|
||||||
resource = self.client.resources.get(kind=self.kind, api_version=self.api_version)
|
|
||||||
try:
|
|
||||||
k8s_obj = resource.get(name=self.name, namespace=self.namespace, label_selector=self.label_selector, field_selector=self.field_selector)
|
|
||||||
except NotFoundError:
|
|
||||||
return []
|
|
||||||
|
|
||||||
if self.name:
|
|
||||||
return [k8s_obj.to_dict()]
|
|
||||||
|
|
||||||
return k8s_obj.to_dict().get('items')
|
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(LookupBase):
|
class LookupModule(LookupBase):
|
||||||
|
|
197
lib/ansible/plugins/lookup/openshift.py
Normal file
197
lib/ansible/plugins/lookup/openshift.py
Normal file
|
@ -0,0 +1,197 @@
|
||||||
|
#
|
||||||
|
# Copyright 2018 Red Hat | Ansible
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
lookup: openshift
|
||||||
|
|
||||||
|
version_added: "2.5"
|
||||||
|
|
||||||
|
short_description: Query the OpenShift API
|
||||||
|
|
||||||
|
description:
|
||||||
|
- Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a
|
||||||
|
namespace, or all matching objects for all namespaces.
|
||||||
|
- Provides access the full range of K8s APIs.
|
||||||
|
- Enables authentication via config file, certificates, password or token.
|
||||||
|
|
||||||
|
options:
|
||||||
|
api_version:
|
||||||
|
description:
|
||||||
|
- Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the
|
||||||
|
I(resource_definition) will override this option.
|
||||||
|
default: v1
|
||||||
|
kind:
|
||||||
|
description:
|
||||||
|
- Use to specify an object model. If I(resource definition) is provided, the I(kind) from a
|
||||||
|
I(resource_definition) will override this option.
|
||||||
|
required: true
|
||||||
|
resource_name:
|
||||||
|
description:
|
||||||
|
- Fetch a specific object by name. If I(resource definition) is provided, the I(metadata.name) value
|
||||||
|
from the I(resource_definition) will override this option.
|
||||||
|
namespace:
|
||||||
|
description:
|
||||||
|
- Limit the objects returned to a specific namespace. If I(resource definition) is provided, the
|
||||||
|
I(metadata.namespace) value from the I(resource_definition) will override this option.
|
||||||
|
label_selector:
|
||||||
|
description:
|
||||||
|
- Additional labels to include in the query. Ignored when I(resource_name) is provided.
|
||||||
|
field_selector:
|
||||||
|
description:
|
||||||
|
- Specific fields on which to query. Ignored when I(resource_name) is provided.
|
||||||
|
resource_definition:
|
||||||
|
description:
|
||||||
|
- "Provide a YAML configuration for an object. NOTE: I(kind), I(api_version), I(resource_name), I(namespace),
|
||||||
|
and I(resource_version) will be overwritten by corresponding values found in the provided
|
||||||
|
I(resource_definition)."
|
||||||
|
src:
|
||||||
|
description:
|
||||||
|
- "Provide a path to a file containing a valid YAML definition of an object dated. Mutually
|
||||||
|
exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(resource_name), and I(namespace)
|
||||||
|
will be overwritten by corresponding values found in the configuration read in from the I(src) file."
|
||||||
|
- Reads from the local file system. To read from the Ansible controller's file system, use the file lookup
|
||||||
|
plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
|
||||||
|
I(resource_definition). See Examples below.
|
||||||
|
host:
|
||||||
|
description:
|
||||||
|
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
|
||||||
|
api_key:
|
||||||
|
description:
|
||||||
|
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
|
||||||
|
kubeconfig:
|
||||||
|
description:
|
||||||
|
- Path to an existing Kubernetes config file. If not provided, and no other connection
|
||||||
|
options are provided, the openshift client will attempt to load the default
|
||||||
|
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
|
||||||
|
variable.
|
||||||
|
context:
|
||||||
|
description:
|
||||||
|
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
|
||||||
|
variable.
|
||||||
|
username:
|
||||||
|
description:
|
||||||
|
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
|
||||||
|
variable.
|
||||||
|
password:
|
||||||
|
description:
|
||||||
|
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
|
||||||
|
variable.
|
||||||
|
cert_file:
|
||||||
|
description:
|
||||||
|
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
|
||||||
|
environment variable.
|
||||||
|
key_file:
|
||||||
|
description:
|
||||||
|
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST environment
|
||||||
|
variable.
|
||||||
|
ssl_ca_cert:
|
||||||
|
description:
|
||||||
|
- Path to a CA certificate used to authenticate with the API. Can also be specified via K8S_AUTH_SSL_CA_CERT
|
||||||
|
environment variable.
|
||||||
|
verify_ssl:
|
||||||
|
description:
|
||||||
|
- Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
|
||||||
|
environment variable.
|
||||||
|
type: bool
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
- "python >= 2.7"
|
||||||
|
- "openshift == 0.4.1"
|
||||||
|
- "PyYAML >= 3.11"
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- "The OpenShift Python client wraps the K8s Python client, providing full access to
|
||||||
|
all of the APIS and models available on both platforms. For API version details and
|
||||||
|
additional information visit https://github.com/openshift/openshift-restclient-python"
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: Fetch a list of projects
|
||||||
|
set_fact:
|
||||||
|
projects: "{{ lookup('openshift', api_version='v1', kind='Project') }}"
|
||||||
|
|
||||||
|
- name: Fetch all deployments
|
||||||
|
set_fact:
|
||||||
|
deployments: "{{ lookup('openshift', kind='DeploymentConfig', namespace='testing') }}"
|
||||||
|
|
||||||
|
- name: Fetch all deployments in a namespace
|
||||||
|
set_fact:
|
||||||
|
deployments: "{{ lookup('openshift', kind='DeploymentConfig', namespace='testing') }}"
|
||||||
|
|
||||||
|
- name: Fetch a specific deployment by name
|
||||||
|
set_fact:
|
||||||
|
deployments: "{{ lookup('openshift', kind='DeploymentConfig', namespace='testing', resource_name='elastic') }}"
|
||||||
|
|
||||||
|
- name: Fetch with label selector
|
||||||
|
set_fact:
|
||||||
|
service: "{{ lookup('openshift', kind='Service', label_selector='app=galaxy') }}"
|
||||||
|
|
||||||
|
# Use parameters from a YAML config
|
||||||
|
|
||||||
|
- name: Load config from the Ansible controller filesystem
|
||||||
|
set_fact:
|
||||||
|
config: "{{ lookup('file', 'service.yml') | from_yaml }}"
|
||||||
|
|
||||||
|
- name: Using the config (loaded from a file in prior task), fetch the latest version of the object
|
||||||
|
set_fact:
|
||||||
|
service: "{{ lookup('openshift', resource_definition=config) }}"
|
||||||
|
|
||||||
|
- name: Use a config from the local filesystem
|
||||||
|
set_fact:
|
||||||
|
service: "{{ lookup('openshift', src='service.yml') }}"
|
||||||
|
"""
|
||||||
|
|
||||||
|
RETURN = """
|
||||||
|
_list:
|
||||||
|
description:
|
||||||
|
- One or more object definitions returned from the API.
|
||||||
|
type: complex
|
||||||
|
contains:
|
||||||
|
api_version:
|
||||||
|
description: The versioned schema of this representation of an object.
|
||||||
|
returned: success
|
||||||
|
type: str
|
||||||
|
kind:
|
||||||
|
description: Represents the REST resource this object represents.
|
||||||
|
returned: success
|
||||||
|
type: str
|
||||||
|
metadata:
|
||||||
|
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
spec:
|
||||||
|
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
status:
|
||||||
|
description: Current status details for the object.
|
||||||
|
returned: success
|
||||||
|
type: complex
|
||||||
|
"""
|
||||||
|
|
||||||
|
from ansible.plugins.lookup import LookupBase
|
||||||
|
from ansible.module_utils.k8s.lookup import OpenShiftLookup
|
||||||
|
|
||||||
|
|
||||||
|
class LookupModule(LookupBase):
|
||||||
|
def run(self, terms, variables=None, **kwargs):
|
||||||
|
return OpenShiftLookup().run(terms, variables=variables, **kwargs)
|
Loading…
Reference in a new issue