Update bare exceptions to specify Exception.
This will keep us from accidentally catching program-exiting exceptions like KeyboardInterupt and SystemExit.
This commit is contained in:
parent
5147e792d3
commit
3fba006207
320 changed files with 659 additions and 656 deletions
|
@ -185,7 +185,7 @@ class ConnectionProcess(object):
|
|||
self.sock.close()
|
||||
if self.connection:
|
||||
self.connection.close()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
if os.path.exists(self.socket_path):
|
||||
|
|
|
@ -62,7 +62,7 @@ def api_get(link, config):
|
|||
result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''),
|
||||
url_password=config.get('auth', 'apipass').replace('\n', ''))
|
||||
return json.loads(result.read())
|
||||
except:
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
|
@ -99,7 +99,7 @@ def cache_available(config):
|
|||
|
||||
try:
|
||||
existing = os.stat('/'.join([dpath, 'inventory']))
|
||||
except:
|
||||
except Exception:
|
||||
# cache doesn't exist or isn't accessible
|
||||
return False
|
||||
|
||||
|
|
|
@ -463,38 +463,38 @@ class AosInventory(object):
|
|||
# Try to reach all parameters from File, if not available try from ENV
|
||||
try:
|
||||
self.aos_server = config.get('aos', 'aos_server')
|
||||
except:
|
||||
except Exception:
|
||||
if 'AOS_SERVER' in os.environ.keys():
|
||||
self.aos_server = os.environ['AOS_SERVER']
|
||||
|
||||
try:
|
||||
self.aos_server_port = config.get('aos', 'port')
|
||||
except:
|
||||
except Exception:
|
||||
if 'AOS_PORT' in os.environ.keys():
|
||||
self.aos_server_port = os.environ['AOS_PORT']
|
||||
|
||||
try:
|
||||
self.aos_username = config.get('aos', 'username')
|
||||
except:
|
||||
except Exception:
|
||||
if 'AOS_USERNAME' in os.environ.keys():
|
||||
self.aos_username = os.environ['AOS_USERNAME']
|
||||
|
||||
try:
|
||||
self.aos_password = config.get('aos', 'password')
|
||||
except:
|
||||
except Exception:
|
||||
if 'AOS_PASSWORD' in os.environ.keys():
|
||||
self.aos_password = os.environ['AOS_PASSWORD']
|
||||
|
||||
try:
|
||||
self.aos_blueprint = config.get('aos', 'blueprint')
|
||||
except:
|
||||
except Exception:
|
||||
if 'AOS_BLUEPRINT' in os.environ.keys():
|
||||
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
|
||||
|
||||
try:
|
||||
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
|
||||
self.aos_blueprint_int = False
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def parse_cli_args(self):
|
||||
|
|
|
@ -397,7 +397,7 @@ class AzureRM(object):
|
|||
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
||||
try:
|
||||
credentials[key] = config.get(profile, key, raw=True)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
|
||||
|
@ -921,7 +921,7 @@ class AzureInventory(object):
|
|||
try:
|
||||
config = cp.ConfigParser()
|
||||
config.read(path)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if config is not None:
|
||||
|
@ -929,7 +929,7 @@ class AzureInventory(object):
|
|||
for key in AZURE_CONFIG_SETTINGS:
|
||||
try:
|
||||
settings[key] = config.get('azure', key, raw=True)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return settings
|
||||
|
|
|
@ -88,7 +88,7 @@ import json
|
|||
|
||||
try:
|
||||
import libbrook
|
||||
except:
|
||||
except Exception:
|
||||
sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook')
|
||||
|
||||
|
||||
|
|
|
@ -208,7 +208,7 @@ class CollinsInventory(object):
|
|||
break
|
||||
cur_page += 1
|
||||
num_retries = 0
|
||||
except:
|
||||
except Exception:
|
||||
self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
|
||||
num_retries += 1
|
||||
return assets
|
||||
|
@ -277,7 +277,7 @@ class CollinsInventory(object):
|
|||
# Locates all server assets from Collins.
|
||||
try:
|
||||
server_assets = self.find_assets()
|
||||
except:
|
||||
except Exception:
|
||||
self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
|
||||
return False
|
||||
|
||||
|
@ -288,7 +288,7 @@ class CollinsInventory(object):
|
|||
ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
|
||||
try:
|
||||
ip_index = int(ip_index)
|
||||
except:
|
||||
except Exception:
|
||||
self.log.error(
|
||||
"ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
|
||||
ip_index)
|
||||
|
@ -350,7 +350,7 @@ class CollinsInventory(object):
|
|||
try:
|
||||
self.write_to_cache(self.cache, self.cache_path_cache)
|
||||
self.write_to_cache(self.inventory, self.cache_path_inventory)
|
||||
except:
|
||||
except Exception:
|
||||
self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
|
||||
return False
|
||||
return True
|
||||
|
@ -388,7 +388,7 @@ class CollinsInventory(object):
|
|||
json_inventory = cache.read()
|
||||
self.inventory = json.loads(json_inventory)
|
||||
return True
|
||||
except:
|
||||
except Exception:
|
||||
self.log.error("Error while loading inventory:\n%s",
|
||||
traceback.format_exc())
|
||||
self.inventory = {}
|
||||
|
@ -402,7 +402,7 @@ class CollinsInventory(object):
|
|||
json_cache = cache.read()
|
||||
self.cache = json.loads(json_cache)
|
||||
return True
|
||||
except:
|
||||
except Exception:
|
||||
self.log.error("Error while loading host cache:\n%s",
|
||||
traceback.format_exc())
|
||||
self.cache = {}
|
||||
|
|
|
@ -335,7 +335,7 @@ class ConsulInventory(object):
|
|||
metadata = json.loads(metadata['Value'])
|
||||
for k, v in metadata.items():
|
||||
self.add_metadata(node_data, k, v)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def load_groups_from_kv(self, node_data):
|
||||
|
|
|
@ -364,7 +364,7 @@ from collections import defaultdict
|
|||
for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
|
||||
try:
|
||||
del sys.path[sys.path.index(path)]
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
HAS_DOCKER_PY = True
|
||||
|
|
|
@ -107,7 +107,7 @@ try:
|
|||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.providers import get_driver
|
||||
_ = Provider.GCE
|
||||
except:
|
||||
except Exception:
|
||||
sys.exit("GCE inventory script requires libcloud >= 0.13")
|
||||
|
||||
|
||||
|
@ -289,7 +289,7 @@ class GceInventory(object):
|
|||
args = list(secrets.GCE_PARAMS)
|
||||
kwargs = secrets.GCE_KEYWORD_PARAMS
|
||||
secrets_found = True
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not secrets_found and secrets_path:
|
||||
|
@ -303,7 +303,7 @@ class GceInventory(object):
|
|||
args = list(getattr(secrets, 'GCE_PARAMS', []))
|
||||
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
|
||||
secrets_found = True
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not secrets_found:
|
||||
|
|
|
@ -88,7 +88,7 @@ try:
|
|||
from chube import api as chube_api
|
||||
from chube.datacenter import Datacenter
|
||||
from chube.linode_obj import Linode
|
||||
except:
|
||||
except Exception:
|
||||
try:
|
||||
# remove local paths and other stuff that may
|
||||
# cause an import conflict, as chube is sensitive
|
||||
|
|
|
@ -29,7 +29,7 @@ import sys
|
|||
import json
|
||||
try:
|
||||
import configparser
|
||||
except:
|
||||
except Exception:
|
||||
from six.moves import configparser
|
||||
|
||||
# Set up defaults
|
||||
|
|
|
@ -254,7 +254,7 @@ class NSoTInventory(object):
|
|||
obj[group]['vars'] = hostvars
|
||||
try:
|
||||
assert isinstance(query, string_types)
|
||||
except:
|
||||
except Exception:
|
||||
sys.exit('ERR: Group queries must be a single string\n'
|
||||
' Group: %s\n'
|
||||
' Query: %s\n' % (group, query)
|
||||
|
|
|
@ -69,7 +69,7 @@ def parse_args():
|
|||
try:
|
||||
# check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
|
||||
RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
|
||||
except:
|
||||
except Exception:
|
||||
# use default values
|
||||
pass
|
||||
|
||||
|
@ -81,7 +81,7 @@ if (parse_args().host):
|
|||
try:
|
||||
nodeids += parse_args().host.split(',')
|
||||
RackhdInventory(nodeids)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
if (parse_args().list):
|
||||
try:
|
||||
|
@ -92,5 +92,5 @@ if (parse_args().list):
|
|||
if entry['type'] == 'compute':
|
||||
nodeids.append(entry['id'])
|
||||
RackhdInventory(nodeids)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
|
|
@ -242,7 +242,7 @@ def _list_into_cache(regions):
|
|||
# pylint: disable=unexpected-keyword-arg
|
||||
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
|
||||
'RAX_ACCESS_IP_VERSION', 4, islist=True))
|
||||
except:
|
||||
except Exception:
|
||||
ip_versions = [4]
|
||||
else:
|
||||
ip_versions = [v for v in ip_versions if v in [4, 6]]
|
||||
|
|
|
@ -258,7 +258,7 @@ class RudderInventory(object):
|
|||
|
||||
try:
|
||||
response, content = self.conn.request(target.geturl(), method, body, headers)
|
||||
except:
|
||||
except Exception:
|
||||
self.fail_with_error('Error connecting to Rudder server')
|
||||
|
||||
try:
|
||||
|
|
|
@ -53,7 +53,7 @@ import json
|
|||
|
||||
try:
|
||||
import requests
|
||||
except:
|
||||
except Exception:
|
||||
sys.exit('requests package is required for this inventory script')
|
||||
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ def get_hosts(host=None):
|
|||
else:
|
||||
returned = {'all': set(), '_metadata': {}}
|
||||
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
|
||||
except:
|
||||
except Exception:
|
||||
sys.exit(1)
|
||||
|
||||
hostvars = {}
|
||||
|
@ -50,7 +50,7 @@ def get_hosts(host=None):
|
|||
|
||||
try:
|
||||
k, v = line.split(':', 1)
|
||||
except:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if k == '':
|
||||
|
@ -67,7 +67,7 @@ def get_hosts(host=None):
|
|||
if 'Value' in ipinfo:
|
||||
a, ip = ipinfo.split(':', 1)
|
||||
hostvars[curname]['ansible_ssh_host'] = ip.strip()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
continue
|
||||
|
|
|
@ -45,7 +45,7 @@ except ImportError:
|
|||
|
||||
try:
|
||||
from zabbix_api import ZabbixAPI
|
||||
except:
|
||||
except Exception:
|
||||
print("Error: Zabbix API library must be installed: pip install zabbix-api.",
|
||||
file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
|
|
@ -305,7 +305,7 @@ class AzureRM(object):
|
|||
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
||||
try:
|
||||
credentials[key] = config.get(profile, key, raw=True)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
|
||||
|
@ -571,7 +571,7 @@ class AzureKeyVaultSecret:
|
|||
try:
|
||||
config = cp.ConfigParser()
|
||||
config.read(path)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if config is not None:
|
||||
|
@ -579,7 +579,7 @@ class AzureKeyVaultSecret:
|
|||
for key in AZURE_VAULT_SETTINGS:
|
||||
try:
|
||||
settings[key] = config.get('azure_keyvault', key, raw=True)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return settings
|
||||
|
|
|
@ -107,7 +107,7 @@ def opts_docs(cli_class_name, cli_module_name):
|
|||
# parse the common options
|
||||
try:
|
||||
cli.parse()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# base/common cli info
|
||||
|
@ -154,7 +154,7 @@ def opts_docs(cli_class_name, cli_module_name):
|
|||
|
||||
try:
|
||||
cli.parse()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# FIXME/TODO: needed?
|
||||
|
|
|
@ -221,7 +221,7 @@ def runtest(modfile, argspath, modname, module_style, interpreters):
|
|||
print(out)
|
||||
print(err)
|
||||
results = json.loads(out)
|
||||
except:
|
||||
except Exception:
|
||||
print("*" * 35)
|
||||
print("INVALID OUTPUT FORMAT")
|
||||
print(out)
|
||||
|
|
|
@ -62,7 +62,7 @@ ansible_facts = {}
|
|||
for fact in facts:
|
||||
try:
|
||||
ansible_facts[fact] = parsed['ansible_facts']['ansible_' + fact]
|
||||
except:
|
||||
except Exception:
|
||||
ansible_facts[fact] = "N/A"
|
||||
|
||||
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
|
||||
|
|
|
@ -78,7 +78,7 @@ class ConfigCLI(CLI):
|
|||
raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
|
||||
|
||||
os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
|
||||
except:
|
||||
except Exception:
|
||||
if self.action in ['view']:
|
||||
raise
|
||||
elif self.action in ['edit', 'update']:
|
||||
|
|
|
@ -30,7 +30,7 @@ HAS_PYCRYPTO_ATFORK = False
|
|||
try:
|
||||
from Crypto.Random import atfork
|
||||
HAS_PYCRYPTO_ATFORK = True
|
||||
except:
|
||||
except Exception:
|
||||
# We only need to call atfork if pycrypto is used because it will need to
|
||||
# reinitialize its RNG. Since old paramiko could be using pycrypto, we
|
||||
# need to take charge of calling it.
|
||||
|
@ -153,7 +153,7 @@ class WorkerProcess(multiprocessing.Process):
|
|||
task_fields=self._task.dump_attrs(),
|
||||
)
|
||||
self._final_q.put(task_result, block=False)
|
||||
except:
|
||||
except Exception:
|
||||
display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
|
||||
display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ class GalaxyAPI(object):
|
|||
role_name = parts[-1]
|
||||
if notify:
|
||||
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
|
||||
except:
|
||||
except Exception:
|
||||
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
|
||||
|
||||
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
|
||||
|
@ -210,7 +210,7 @@ class GalaxyAPI(object):
|
|||
results += data['results']
|
||||
done = (data.get('next_link', None) is None)
|
||||
return results
|
||||
except:
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@g_connect
|
||||
|
|
|
@ -60,12 +60,12 @@ class GalaxyLogin(object):
|
|||
|
||||
try:
|
||||
self.github_username = input("Github Username: ")
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not self.github_username or not self.github_password:
|
||||
|
|
|
@ -100,7 +100,7 @@ class GalaxyRole(object):
|
|||
try:
|
||||
f = open(meta_path, 'r')
|
||||
self._metadata = yaml.safe_load(f)
|
||||
except:
|
||||
except Exception:
|
||||
display.vvvvv("Unable to load metadata for %s" % self.name)
|
||||
return False
|
||||
finally:
|
||||
|
@ -120,7 +120,7 @@ class GalaxyRole(object):
|
|||
try:
|
||||
f = open(info_path, 'r')
|
||||
self._install_info = yaml.safe_load(f)
|
||||
except:
|
||||
except Exception:
|
||||
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
|
||||
return False
|
||||
finally:
|
||||
|
@ -144,7 +144,7 @@ class GalaxyRole(object):
|
|||
with open(info_path, 'w+') as f:
|
||||
try:
|
||||
self._install_info = yaml.safe_dump(info, f)
|
||||
except:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -159,7 +159,7 @@ class GalaxyRole(object):
|
|||
try:
|
||||
rmtree(self.path)
|
||||
return True
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
@ -285,7 +285,7 @@ class GalaxyRole(object):
|
|||
else:
|
||||
try:
|
||||
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
|
||||
except:
|
||||
except Exception:
|
||||
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
|
||||
|
||||
# we strip off any higher-level directories for all of the files contained within
|
||||
|
|
|
@ -116,7 +116,7 @@ def retry(retries=None, retry_pause=1):
|
|||
raise Exception("Retry limit exceeded: %d" % retries)
|
||||
try:
|
||||
ret = f(*args, **kwargs)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
if ret:
|
||||
break
|
||||
|
|
|
@ -1069,7 +1069,7 @@ class AzureRMAuth(object):
|
|||
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
||||
try:
|
||||
credentials[key] = config.get(profile, key, raw=True)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if credentials.get('subscription_id'):
|
||||
|
|
|
@ -553,7 +553,7 @@ def human_to_bytes(number, default_unit=None, isbits=False):
|
|||
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
|
||||
try:
|
||||
num = float(m.group(1))
|
||||
except:
|
||||
except Exception:
|
||||
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
|
||||
|
||||
unit = m.group(2)
|
||||
|
@ -566,7 +566,7 @@ def human_to_bytes(number, default_unit=None, isbits=False):
|
|||
range_key = unit[0].upper()
|
||||
try:
|
||||
limit = SIZE_RANGES[range_key]
|
||||
except:
|
||||
except Exception:
|
||||
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
|
||||
|
||||
# default value
|
||||
|
@ -1028,7 +1028,7 @@ class AnsibleModule(object):
|
|||
f = open('/proc/mounts', 'r')
|
||||
mount_data = f.readlines()
|
||||
f.close()
|
||||
except:
|
||||
except Exception:
|
||||
return (False, None)
|
||||
path_mount_point = self.find_mount_point(path)
|
||||
for line in mount_data:
|
||||
|
@ -1310,7 +1310,7 @@ class AnsibleModule(object):
|
|||
output['attr_flags'] = res[1].replace('-', '').strip()
|
||||
output['version'] = res[0].strip()
|
||||
output['attributes'] = format_attributes(output['attr_flags'])
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
return output
|
||||
|
||||
|
@ -1820,7 +1820,7 @@ class AnsibleModule(object):
|
|||
if value.startswith("{"):
|
||||
try:
|
||||
return json.loads(value)
|
||||
except:
|
||||
except Exception:
|
||||
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
|
||||
if exc is not None:
|
||||
raise TypeError('unable to evaluate string as dictionary')
|
||||
|
@ -2163,7 +2163,7 @@ class AnsibleModule(object):
|
|||
if not os.access(cwd, os.F_OK | os.R_OK):
|
||||
raise Exception()
|
||||
return cwd
|
||||
except:
|
||||
except Exception:
|
||||
# we don't have access to the cwd, probably because of sudo.
|
||||
# Try and move to a neutral location to prevent errors
|
||||
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
|
||||
|
@ -2171,7 +2171,7 @@ class AnsibleModule(object):
|
|||
if os.access(cwd, os.F_OK | os.R_OK):
|
||||
os.chdir(cwd)
|
||||
return cwd
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
# we won't error here, as it may *not* be a problem,
|
||||
# and we don't want to break modules unnecessarily
|
||||
|
|
|
@ -28,7 +28,7 @@ def get_distribution():
|
|||
distribution = 'Amazon'
|
||||
else:
|
||||
distribution = 'OtherLinux'
|
||||
except:
|
||||
except Exception:
|
||||
# FIXME: MethodMissing, I assume?
|
||||
distribution = platform.dist()[0].capitalize()
|
||||
return distribution
|
||||
|
|
|
@ -49,7 +49,7 @@ try:
|
|||
import boto3
|
||||
import botocore
|
||||
HAS_BOTO3 = True
|
||||
except:
|
||||
except Exception:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
try:
|
||||
|
|
|
@ -50,7 +50,7 @@ def api_wrapper(func):
|
|||
module.fail_json(msg=e.message)
|
||||
except core.exceptions.SystemNotFoundException as e:
|
||||
module.fail_json(msg=e.message)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
return __wrapper
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ def not_in_host_file(self, host):
|
|||
hash.update(host)
|
||||
if hash.digest() == kn_host.decode('base64'):
|
||||
return False
|
||||
except:
|
||||
except Exception:
|
||||
# invalid hashed host key, skip it
|
||||
continue
|
||||
else:
|
||||
|
@ -164,7 +164,7 @@ def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
|
|||
if create_dir:
|
||||
try:
|
||||
os.makedirs(user_ssh_dir, int('700', 8))
|
||||
except:
|
||||
except Exception:
|
||||
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
|
||||
else:
|
||||
module.fail_json(msg="%s does not exist" % user_ssh_dir)
|
||||
|
|
|
@ -81,7 +81,7 @@ try:
|
|||
from solidfire.models import Schedule, ScheduleInfo
|
||||
|
||||
HAS_SF_SDK = True
|
||||
except:
|
||||
except Exception:
|
||||
HAS_SF_SDK = False
|
||||
|
||||
|
||||
|
@ -124,7 +124,7 @@ def create_sf_connection(module, port=None):
|
|||
try:
|
||||
return_val = ElementFactory.create(hostname, username, password, port=port)
|
||||
return return_val
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception("Unable to create SF connection")
|
||||
else:
|
||||
module.fail_json(msg="the python SolidFire SDK module is required")
|
||||
|
@ -237,7 +237,7 @@ def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
|||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
except Exception:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
|
|
|
@ -6,7 +6,7 @@ try:
|
|||
import solidfire.common
|
||||
|
||||
HAS_SF_SDK = True
|
||||
except:
|
||||
except Exception:
|
||||
HAS_SF_SDK = False
|
||||
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ def axapi_call(module, url, post=None):
|
|||
data = {"response": {"status": "OK"}}
|
||||
else:
|
||||
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
||||
except:
|
||||
except Exception:
|
||||
module.fail_json(msg="could not read the result from the host")
|
||||
finally:
|
||||
rsp.close()
|
||||
|
@ -126,7 +126,7 @@ def axapi_call_v3(module, url, method=None, body=None, signature=None):
|
|||
data = {"response": {"status": "OK"}}
|
||||
else:
|
||||
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
||||
except:
|
||||
except Exception:
|
||||
module.fail_json(msg="could not read the result from the host")
|
||||
finally:
|
||||
rsp.close()
|
||||
|
|
|
@ -148,7 +148,7 @@ class ACIModule(object):
|
|||
return true
|
||||
elif bool_value is False:
|
||||
return false
|
||||
except:
|
||||
except Exception:
|
||||
# This provides backward compatibility to Ansible v2.4, deprecate in Ansible v2.8
|
||||
if value == true:
|
||||
self.module.deprecate("Boolean value '%s' is no longer valid, please use 'yes' as a boolean value." % value, '2.9')
|
||||
|
@ -164,7 +164,7 @@ class ACIModule(object):
|
|||
''' Return an ACI-compatible ISO8601 formatted time: 2123-12-12T00:00:00.000+00:00 '''
|
||||
try:
|
||||
return dt.isoformat(timespec='milliseconds')
|
||||
except:
|
||||
except Exception:
|
||||
tz = dt.strftime('%z')
|
||||
return '%s.%03d%s:%s' % (dt.strftime('%Y-%m-%dT%H:%M:%S'), dt.microsecond / 1000, tz[:3], tz[3:])
|
||||
|
||||
|
@ -231,7 +231,7 @@ class ACIModule(object):
|
|||
|
||||
try:
|
||||
sig_key = load_privatekey(FILETYPE_PEM, open(self.params['private_key'], 'r').read())
|
||||
except:
|
||||
except Exception:
|
||||
self.module.fail_json(msg='Cannot load private key %s' % self.params['private_key'])
|
||||
|
||||
# NOTE: ACI documentation incorrectly adds a space between method and path
|
||||
|
|
|
@ -183,7 +183,7 @@ class MSCModule(object):
|
|||
elif self.status >= 400:
|
||||
try:
|
||||
payload = json.loads(resp.read())
|
||||
except:
|
||||
except Exception:
|
||||
payload = json.loads(info['body'])
|
||||
if 'code' in payload:
|
||||
self.fail_json(msg='MSC Error {code}: {message}'.format(**payload), data=data, info=info, payload=payload)
|
||||
|
|
|
@ -147,7 +147,7 @@ def content_to_dict(module, content):
|
|||
if not content_dict:
|
||||
raise Exception()
|
||||
|
||||
except:
|
||||
except Exception:
|
||||
module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid")
|
||||
|
||||
# replace the string with the dict
|
||||
|
@ -163,7 +163,7 @@ def do_load_resource(module, collection, name):
|
|||
|
||||
try:
|
||||
item = find_collection_item(collection, name, '')
|
||||
except:
|
||||
except Exception:
|
||||
module.fail_json(msg="An error occurred while running 'find_collection_item'")
|
||||
|
||||
if item.exists:
|
||||
|
|
|
@ -38,7 +38,7 @@ try:
|
|||
from ansible.module_utils.network.cnos import cnos_errorcodes
|
||||
from ansible.module_utils.network.cnos import cnos_devicerules
|
||||
HAS_LIB = True
|
||||
except:
|
||||
except Exception:
|
||||
HAS_LIB = False
|
||||
from distutils.cmd import Command
|
||||
from ansible.module_utils._text import to_text
|
||||
|
@ -1372,7 +1372,7 @@ def enterEnableModeForDevice(enablePassword, timeout, obj):
|
|||
gotit = buff.find("#")
|
||||
if(gotit != -1):
|
||||
return retVal
|
||||
except:
|
||||
except Exception:
|
||||
retVal = retVal + "\n Error-101"
|
||||
flag = True
|
||||
if(retVal == ""):
|
||||
|
@ -1396,7 +1396,7 @@ def waitForDeviceResponse(command, prompt, timeout, obj):
|
|||
gotit = buff.find(prompt)
|
||||
if(gotit != -1):
|
||||
flag = True
|
||||
except:
|
||||
except Exception:
|
||||
# debugOutput(prompt)
|
||||
if prompt == "(yes/no)?":
|
||||
retVal = retVal
|
||||
|
|
|
@ -461,7 +461,7 @@ class Template:
|
|||
if value:
|
||||
try:
|
||||
return ast.literal_eval(value)
|
||||
except:
|
||||
except Exception:
|
||||
return str(value)
|
||||
else:
|
||||
return None
|
||||
|
|
|
@ -79,7 +79,7 @@ def backup(module, running_config):
|
|||
if not os.path.exists(backup_path):
|
||||
try:
|
||||
os.mkdir(backup_path)
|
||||
except:
|
||||
except Exception:
|
||||
module.fail_json(msg="Can't create directory {0} Permission denied ?".format(backup_path))
|
||||
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
|
||||
if 0 < len(backup_filename):
|
||||
|
@ -88,7 +88,7 @@ def backup(module, running_config):
|
|||
filename = '%s/%s_config.%s' % (backup_path, module.params['host'], tstamp)
|
||||
try:
|
||||
open(filename, 'w').write(running_config)
|
||||
except:
|
||||
except Exception:
|
||||
module.fail_json(msg="Can't create backup file {0} Permission denied ?".format(filename))
|
||||
|
||||
|
||||
|
|
|
@ -292,7 +292,7 @@ class MerakiModule(object):
|
|||
body=json.loads(to_native(info['body'])))
|
||||
try:
|
||||
return json.loads(to_native(resp.read()))
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def exit_json(self, **kwargs):
|
||||
|
|
|
@ -217,7 +217,7 @@ def rax_find_loadbalancer(module, rax_module, loadbalancer):
|
|||
clb = rax_module.cloud_loadbalancers
|
||||
try:
|
||||
found = clb.get(loadbalancer)
|
||||
except:
|
||||
except Exception:
|
||||
found = []
|
||||
for lb in clb.list():
|
||||
if loadbalancer == lb.name:
|
||||
|
|
|
@ -35,7 +35,7 @@ class RedfishUtils(object):
|
|||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except:
|
||||
except Exception:
|
||||
return {'ret': False, 'msg': "Unknown error"}
|
||||
return {'ret': True, 'data': data}
|
||||
|
||||
|
@ -53,7 +53,7 @@ class RedfishUtils(object):
|
|||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except:
|
||||
except Exception:
|
||||
return {'ret': False, 'msg': "Unknown error"}
|
||||
return {'ret': True, 'resp': resp}
|
||||
|
||||
|
@ -71,7 +71,7 @@ class RedfishUtils(object):
|
|||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except:
|
||||
except Exception:
|
||||
return {'ret': False, 'msg': "Unknown error"}
|
||||
return {'ret': True, 'resp': resp}
|
||||
|
||||
|
@ -89,7 +89,7 @@ class RedfishUtils(object):
|
|||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except:
|
||||
except Exception:
|
||||
return {'ret': False, 'msg': "Unknown error"}
|
||||
return {'ret': True, 'resp': resp}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
try:
|
||||
import ucsmsdk
|
||||
HAS_UCSMSDK = True
|
||||
except:
|
||||
except Exception:
|
||||
HAS_UCSMSDK = False
|
||||
|
||||
ucs_argument_spec = dict(
|
||||
|
|
|
@ -71,13 +71,13 @@ urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectH
|
|||
try:
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
|
||||
HAS_URLPARSE = True
|
||||
except:
|
||||
except Exception:
|
||||
HAS_URLPARSE = False
|
||||
|
||||
try:
|
||||
import ssl
|
||||
HAS_SSL = True
|
||||
except:
|
||||
except Exception:
|
||||
HAS_SSL = False
|
||||
|
||||
try:
|
||||
|
@ -436,7 +436,7 @@ def generic_urlparse(parts):
|
|||
generic_parts['password'] = password
|
||||
generic_parts['hostname'] = hostname
|
||||
generic_parts['port'] = port
|
||||
except:
|
||||
except Exception:
|
||||
generic_parts['username'] = None
|
||||
generic_parts['password'] = None
|
||||
generic_parts['hostname'] = parts[1]
|
||||
|
@ -673,7 +673,7 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
|||
(http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
|
||||
if int(resp_code) not in valid_codes:
|
||||
raise Exception
|
||||
except:
|
||||
except Exception:
|
||||
raise ProxyError('Connection to proxy failed')
|
||||
|
||||
def detect_no_proxy(self, url):
|
||||
|
@ -784,7 +784,7 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
|||
# cleanup the temp file created, don't worry
|
||||
# if it fails for some reason
|
||||
os.remove(tmp_ca_cert_path)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
|
@ -792,7 +792,7 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
|||
# if it fails for some reason
|
||||
if to_add_ca_cert_path:
|
||||
os.remove(to_add_ca_cert_path)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return req
|
||||
|
@ -1305,7 +1305,7 @@ def fetch_url(module, url, data=None, headers=None, method=None,
|
|||
try:
|
||||
# Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
|
||||
info.update(dict((k.lower(), v) for k, v in e.info().items()))
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
info.update({'msg': to_native(e), 'body': body, 'status': e.code})
|
||||
|
|
|
@ -153,7 +153,7 @@ from ansible.module_utils.aws.direct_connect import (DirectConnectError, delete_
|
|||
|
||||
try:
|
||||
from botocore.exceptions import BotoCoreError, ClientError
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
# handled by imported AnsibleAWSModule
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ import time
|
|||
|
||||
try:
|
||||
import botocore
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
# handled by imported HAS_BOTO3
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clea
|
|||
kms.put_key_policy(KeyId=keyarn, PolicyName='default', Policy=policy_json_string)
|
||||
# returns nothing, so we have to just assume it didn't throw
|
||||
ret['changed'] = True
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
ret['changes_needed'] = changes_needed
|
||||
|
|
|
@ -91,7 +91,7 @@ rules:
|
|||
|
||||
try:
|
||||
from botocore.exceptions import ClientError, BotoCoreError
|
||||
except:
|
||||
except Exception:
|
||||
# handled by HAS_BOTO3 check in main
|
||||
pass
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ def create_update_parameter(client, module):
|
|||
|
||||
try:
|
||||
existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if existing_parameter:
|
||||
|
|
|
@ -477,7 +477,7 @@ def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
|
|||
try:
|
||||
stack = get_stack_facts(cfn, stack_name)
|
||||
existed.append('yes')
|
||||
except:
|
||||
except Exception:
|
||||
# If the stack previously existed, and now can't be found then it's
|
||||
# been deleted successfully.
|
||||
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
|
||||
|
|
|
@ -608,7 +608,7 @@ def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
|
|||
if isinstance(tags, str):
|
||||
try:
|
||||
tags = literal_eval(tags)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# if not a string type, convert and make sure it's a text string
|
||||
|
|
|
@ -1156,7 +1156,7 @@ def create_autoscaling_group(connection):
|
|||
else:
|
||||
try:
|
||||
ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName']
|
||||
except:
|
||||
except Exception:
|
||||
launch_template = as_group['LaunchTemplate']
|
||||
# Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg.
|
||||
ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']}
|
||||
|
|
|
@ -516,7 +516,7 @@ class ElbManager(object):
|
|||
def get_info(self):
|
||||
try:
|
||||
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
|
||||
except:
|
||||
except Exception:
|
||||
check_elb = None
|
||||
|
||||
if not check_elb:
|
||||
|
@ -528,11 +528,11 @@ class ElbManager(object):
|
|||
else:
|
||||
try:
|
||||
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
|
||||
except:
|
||||
except Exception:
|
||||
lb_cookie_policy = None
|
||||
try:
|
||||
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
|
||||
except:
|
||||
except Exception:
|
||||
app_cookie_policy = None
|
||||
|
||||
info = {
|
||||
|
|
|
@ -506,7 +506,7 @@ class Ec2Metadata(object):
|
|||
self._data['%s' % (new_uri)] = content
|
||||
for (key, value) in dict.items():
|
||||
self._data['%s:%s' % (new_uri, key.lower())] = value
|
||||
except:
|
||||
except Exception:
|
||||
self._data['%s' % (new_uri)] = content # not a stringifed JSON string
|
||||
|
||||
def fix_invalid_varnames(self, data):
|
||||
|
|
|
@ -124,7 +124,7 @@ from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dic
|
|||
|
||||
try:
|
||||
from botocore.exceptions import BotoCoreError, ClientError
|
||||
except:
|
||||
except Exception:
|
||||
pass # Handled by AnsibleAWSModule
|
||||
|
||||
|
||||
|
|
|
@ -306,7 +306,7 @@ def run(ecr, params, verbosity):
|
|||
ecr.set_repository_policy(
|
||||
registry_id, name, policy_text, force_set_policy)
|
||||
result['changed'] = True
|
||||
except:
|
||||
except Exception:
|
||||
# Some failure w/ the policy. It's helpful to know what the
|
||||
# policy is.
|
||||
result['policy'] = policy_text
|
||||
|
|
|
@ -512,7 +512,7 @@ class ElbManager(object):
|
|||
def get_info(self):
|
||||
try:
|
||||
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
|
||||
except:
|
||||
except Exception:
|
||||
check_elb = None
|
||||
|
||||
if not check_elb:
|
||||
|
@ -524,11 +524,11 @@ class ElbManager(object):
|
|||
else:
|
||||
try:
|
||||
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
|
||||
except:
|
||||
except Exception:
|
||||
lb_cookie_policy = None
|
||||
try:
|
||||
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
|
||||
except:
|
||||
except Exception:
|
||||
app_cookie_policy = None
|
||||
|
||||
info = {
|
||||
|
|
|
@ -134,7 +134,7 @@ from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
|
|||
|
||||
try:
|
||||
from botocore.exceptions import ClientError
|
||||
except:
|
||||
except Exception:
|
||||
pass # will be protected by AnsibleAWSModule
|
||||
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ def set_queue_attribute(queue, attribute, value, check_mode=False):
|
|||
|
||||
try:
|
||||
existing_value = queue.get_attributes(attributes=attribute)[attribute]
|
||||
except:
|
||||
except Exception:
|
||||
existing_value = ''
|
||||
|
||||
# convert dict attributes to JSON strings (sort keys for comparing)
|
||||
|
|
|
@ -68,7 +68,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
|||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ appserviceplans:
|
|||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ try:
|
|||
from azure.mgmt.cdn.models import ErrorResponseException
|
||||
from azure.common import AzureHttpError
|
||||
from azure.mgmt.cdn import CdnManagementClient
|
||||
except:
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -590,7 +590,7 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
|
|||
)
|
||||
for op in self._get_failed_nested_operations(operations)
|
||||
]
|
||||
except:
|
||||
except Exception:
|
||||
# If we fail here, the original error gets lost and user receives wrong error message/stacktrace
|
||||
pass
|
||||
self.log(dict(failed_deployment_operations=results), pretty_print=True)
|
||||
|
|
|
@ -98,7 +98,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
|||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
|||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ azure_functionapps:
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -167,7 +167,7 @@ images:
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
|||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ azure_networkinterfaces:
|
|||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
@ -165,7 +165,7 @@ class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
|
|||
item = None
|
||||
try:
|
||||
item = self.network_client.network_interfaces.get(self.resource_group, self.name)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if item and self.has_tags(item.tags, self.tags):
|
||||
|
|
|
@ -77,7 +77,7 @@ azure_publicipaddresses:
|
|||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -258,7 +258,7 @@ class AzureRMResource(AzureRMModuleBase):
|
|||
try:
|
||||
response = json.loads(original.text)
|
||||
needs_update = (dict_merge(response, self.body) != response)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if needs_update:
|
||||
|
@ -266,7 +266,7 @@ class AzureRMResource(AzureRMModuleBase):
|
|||
if self.state == 'present':
|
||||
try:
|
||||
response = json.loads(response.text)
|
||||
except:
|
||||
except Exception:
|
||||
response = response.text
|
||||
else:
|
||||
response = None
|
||||
|
|
|
@ -198,7 +198,7 @@ class AzureRMResourceFacts(AzureRMModuleBase):
|
|||
self.results['response'] = response
|
||||
else:
|
||||
self.results['response'] = [response]
|
||||
except:
|
||||
except Exception:
|
||||
self.results['response'] = []
|
||||
|
||||
return self.results
|
||||
|
|
|
@ -77,7 +77,7 @@ azure_resourcegroups:
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ routes:
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ azure_securitygroups:
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ azure_storageaccounts:
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ from ansible.module_utils.common.dict_transformations import (
|
|||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ from ansible.module_utils.common.dict_transformations import _camel_to_snake
|
|||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ vms:
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -240,7 +240,7 @@ import re
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
@ -308,7 +308,7 @@ class AzureRMVirtualMachineScaleSetFacts(AzureRMModuleBase):
|
|||
subnet_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0]
|
||||
['properties']['ipConfigurations'][0]['properties']['subnet']['id'])
|
||||
subnet_name = re.sub('.*subnets\\/', '', subnet_id)
|
||||
except:
|
||||
except Exception:
|
||||
self.log('Could not extract subnet name')
|
||||
|
||||
try:
|
||||
|
@ -316,13 +316,13 @@ class AzureRMVirtualMachineScaleSetFacts(AzureRMModuleBase):
|
|||
['properties']['ipConfigurations'][0]['properties']['loadBalancerBackendAddressPools'][0]['id'])
|
||||
load_balancer_name = re.sub('\\/backendAddressPools.*', '', re.sub('.*loadBalancers\\/', '', backend_address_pool_id))
|
||||
virtual_network_name = re.sub('.*virtualNetworks\\/', '', re.sub('\\/subnets.*', '', subnet_id))
|
||||
except:
|
||||
except Exception:
|
||||
self.log('Could not extract load balancer / virtual network name')
|
||||
|
||||
try:
|
||||
ssh_password_enabled = (not vmss['properties']['virtualMachineProfile']['osProfile'],
|
||||
['linuxConfiguration']['disablePasswordAuthentication'])
|
||||
except:
|
||||
except Exception:
|
||||
self.log('Could not extract SSH password enabled')
|
||||
|
||||
data_disks = vmss['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])
|
||||
|
|
|
@ -90,7 +90,7 @@ azure_vmimages:
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ azure_virtualnetworks:
|
|||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -150,7 +150,7 @@ try:
|
|||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
except:
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ class CloudStackFacts(object):
|
|||
try:
|
||||
# this data come form users, we try what we can to parse it...
|
||||
return yaml.safe_load(self._fetch(CS_USERDATA_BASE_URL))
|
||||
except:
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _fetch(self, path):
|
||||
|
|
|
@ -890,7 +890,7 @@ try:
|
|||
else:
|
||||
from docker.utils.types import Ulimit, LogConfig
|
||||
from docker.errors import APIError, NotFound
|
||||
except Exception as dummy:
|
||||
except Exception:
|
||||
# missing docker-py handled in ansible.module_utils.docker
|
||||
pass
|
||||
|
||||
|
|
|
@ -263,7 +263,7 @@ try:
|
|||
from docker.errors import NotFound
|
||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
from docker.types import IPAMPool, IPAMConfig
|
||||
except Exception as dummy:
|
||||
except Exception:
|
||||
# missing docker-py handled in ansible.module_utils.docker_common
|
||||
pass
|
||||
|
||||
|
|
|
@ -471,7 +471,7 @@ from ansible.module_utils._text import to_text
|
|||
try:
|
||||
from distutils.version import LooseVersion
|
||||
from docker import types
|
||||
except Exception as dummy:
|
||||
except Exception:
|
||||
# missing docker-py handled in ansible.module_utils.docker
|
||||
pass
|
||||
|
||||
|
@ -846,7 +846,7 @@ class DockerService(DockerBaseClass):
|
|||
network_id = None
|
||||
try:
|
||||
network_id = list(filter(lambda n: n['name'] == network_name, docker_networks))[0]['id']
|
||||
except Exception as dummy:
|
||||
except Exception:
|
||||
pass
|
||||
if network_id:
|
||||
networks.append({'Target': network_id})
|
||||
|
|
|
@ -330,11 +330,11 @@ def get_instance_info(inst):
|
|||
|
||||
try:
|
||||
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
|
||||
except:
|
||||
except Exception:
|
||||
netname = None
|
||||
try:
|
||||
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
|
||||
except:
|
||||
except Exception:
|
||||
subnetname = None
|
||||
if 'disks' in inst.extra:
|
||||
disk_names = [disk_info['source'].split('/')[-1]
|
||||
|
|
|
@ -239,7 +239,7 @@ def main():
|
|||
zone, node_name = node.split('/')
|
||||
nodes.append(gce.ex_get_node(node_name, zone))
|
||||
output_nodes.append(node)
|
||||
except:
|
||||
except Exception:
|
||||
# skip nodes that are badly formatted or don't exist
|
||||
pass
|
||||
try:
|
||||
|
|
|
@ -188,7 +188,7 @@ def main():
|
|||
is_attached = True
|
||||
json_output['attached_mode'] = d['mode']
|
||||
json_output['attached_to_instance'] = inst.name
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# find disk if it already exists
|
||||
|
@ -210,7 +210,7 @@ def main():
|
|||
size_gb = int(round(float(size_gb)))
|
||||
if size_gb < 1:
|
||||
raise Exception
|
||||
except:
|
||||
except Exception:
|
||||
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
|
||||
changed=False)
|
||||
|
||||
|
|
|
@ -203,7 +203,7 @@ def _validate_params(params):
|
|||
try:
|
||||
check_params(params, fields)
|
||||
_validate_backend_params(params['backends'])
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
return (True, '')
|
||||
|
@ -233,7 +233,7 @@ def _validate_backend_params(backends):
|
|||
for backend in backends:
|
||||
try:
|
||||
check_params(backend, fields)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
if 'max_rate' in backend and 'max_rate_per_instance' in backend:
|
||||
|
|
|
@ -178,7 +178,7 @@ def get_global_forwarding_rule(client, name, project_id=None):
|
|||
req = client.globalForwardingRules().get(
|
||||
project=project_id, forwardingRule=name)
|
||||
return GCPUtils.execute_api_client_req(req, raise_404=False)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
@ -204,7 +204,7 @@ def create_global_forwarding_rule(client, params, project_id):
|
|||
name=params['forwarding_rule_name'],
|
||||
project_id=project_id)
|
||||
return (True, return_data)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
@ -229,7 +229,7 @@ def delete_global_forwarding_rule(client, name, project_id):
|
|||
project=project_id, forwardingRule=name)
|
||||
return_data = GCPUtils.execute_api_client_req(req, client)
|
||||
return (True, return_data)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
@ -270,7 +270,7 @@ def update_global_forwarding_rule(client, forwarding_rule, params, name, project
|
|||
return_data = GCPUtils.execute_api_client_req(
|
||||
req, client=client, raw=False)
|
||||
return (True, return_data)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
|
|
@ -261,7 +261,7 @@ def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
|
|||
args = {'project': project_id, entity_name: name}
|
||||
req = resource.get(**args)
|
||||
return GCPUtils.execute_api_client_req(req, raise_404=False)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
@ -289,7 +289,7 @@ def create_healthcheck(client, params, project_id, resource_type='HTTP'):
|
|||
name=params['healthcheck_name'],
|
||||
project_id=project_id)
|
||||
return (True, return_data)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
@ -315,7 +315,7 @@ def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
|
|||
req = resource.delete(**args)
|
||||
return_data = GCPUtils.execute_api_client_req(req, client)
|
||||
return (True, return_data)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
@ -356,7 +356,7 @@ def update_healthcheck(client, healthcheck, params, name, project_id,
|
|||
return_data = GCPUtils.execute_api_client_req(
|
||||
req, client=client, raw=False)
|
||||
return (True, return_data)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ def create_target_http_proxy(client, params, project_id):
|
|||
name=params['target_proxy_name'],
|
||||
project_id=project_id)
|
||||
return (True, return_data)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
@ -186,7 +186,7 @@ def delete_target_http_proxy(client, name, project_id):
|
|||
project=project_id, targetHttpProxy=name)
|
||||
return_data = GCPUtils.execute_api_client_req(req, client)
|
||||
return (True, return_data)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
@ -227,7 +227,7 @@ def update_target_http_proxy(client, target_proxy, params, name, project_id):
|
|||
return_data = GCPUtils.execute_api_client_req(
|
||||
req, client=client, raw=False)
|
||||
return (True, return_data)
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue