* nxos_vpc: pkl_vrf fixes for #57069 Fixes #57069 - Symptom: When playbooks specify `pkl_vrf: default`, the result is that the cli does not set the `vrf` state. - Analysis: - First issue: 'default' is a reserved word when used with the `peer-keepalive` `vrf` keyword. It refers to the default rib. - This is confusing in several ways because `peer-keepalive`'s *default* vrf is the `management` vrf. - Second issue: When changing only one optional value (`pkl_vrf`) while other optional values are idempotent (`pkl_src`), the result is that the idempotent values are ignored; unfortunately the device cli *replaces* the entire command, in which case the idempotent values are removed. - e.g. playbook specifies this: ``` { pkl_dest: 10.1.1.1, pkl_src: 10.2.2.2, pkl_vrf: my_vrf } ``` ``` peer-keepalive dest 10.1.1.1 src 10.2.2.2 # original peer-keepalive dest 10.1.1.1 src 10.2.2.2 vrf my_vrf # intended result peer-keepalive dest 10.1.1.1 vrf my_vrf # actual result ``` - Third issue: the `pkl` getter was relying on positional data. This broke when the `udp` keyword nvgen'd where `vrf` used to appear (shifting all keywords to the right). - Tested on regression platforms: `N3K,N6k,N7K,N9K,N3K-F,N9K-F` * PEP fixes * PEP fix 2 * pkl should merge by default, not override * rmv debugs * add mike's tests * fix comments
This commit is contained in:
parent
760dc19284
commit
0868fec8bd
7 changed files with 219 additions and 13 deletions
|
@ -58,9 +58,12 @@ options:
|
|||
pkl_dest:
|
||||
description:
|
||||
- Destination (remote) IP address used for peer keepalive link
|
||||
- pkl_dest is required whenever pkl options are used.
|
||||
pkl_vrf:
|
||||
description:
|
||||
- VRF used for peer keepalive link
|
||||
- The VRF must exist on the device before using pkl_vrf.
|
||||
- "(Note) 'default' is an overloaded term: Default vrf context for pkl_vrf is 'management'; 'pkl_vrf: default' refers to the literal 'default' rib."
|
||||
default: management
|
||||
peer_gw:
|
||||
description:
|
||||
|
@ -262,19 +265,46 @@ def get_vpc(module):
|
|||
if 'peer-gateway' in each:
|
||||
vpc['peer_gw'] = False if 'no ' in each else True
|
||||
if 'peer-keepalive destination' in each:
|
||||
line = each.split()
|
||||
vpc['pkl_dest'] = line[2]
|
||||
vpc['pkl_vrf'] = 'management'
|
||||
if 'source' in each:
|
||||
vpc['pkl_src'] = line[4]
|
||||
if 'vrf' in each:
|
||||
vpc['pkl_vrf'] = line[6]
|
||||
else:
|
||||
if 'vrf' in each:
|
||||
vpc['pkl_vrf'] = line[4]
|
||||
# destination is reqd; src & vrf are optional
|
||||
m = re.search(r'destination (?P<pkl_dest>[\d.]+)'
|
||||
r'(?:.* source (?P<pkl_src>[\d.]+))*'
|
||||
r'(?:.* vrf (?P<pkl_vrf>\S+))*',
|
||||
each)
|
||||
if m:
|
||||
for pkl in m.groupdict().keys():
|
||||
if m.group(pkl):
|
||||
vpc[pkl] = m.group(pkl)
|
||||
return vpc
|
||||
|
||||
|
||||
def pkl_dependencies(module, delta, existing):
|
||||
"""peer-keepalive dependency checking.
|
||||
1. 'destination' is required with all pkl configs.
|
||||
2. If delta has optional pkl keywords present, then all optional pkl
|
||||
keywords in existing must be added to delta, otherwise the device cli
|
||||
will remove those values when the new config string is issued.
|
||||
3. The desired behavior for this set of properties is to merge changes;
|
||||
therefore if an optional pkl property exists on the device but not
|
||||
in the playbook, then that existing property should be retained.
|
||||
Example:
|
||||
CLI: peer-keepalive dest 10.1.1.1 source 10.1.1.2 vrf orange
|
||||
Playbook: {pkl_dest: 10.1.1.1, pkl_vrf: blue}
|
||||
Result: peer-keepalive dest 10.1.1.1 source 10.1.1.2 vrf blue
|
||||
"""
|
||||
pkl_existing = [i for i in existing.keys() if i.startswith('pkl')]
|
||||
for pkl in pkl_existing:
|
||||
param = module.params.get(pkl)
|
||||
if not delta.get(pkl):
|
||||
if param and param == existing[pkl]:
|
||||
# delta is missing this param because it's idempotent;
|
||||
# however another pkl command has changed; therefore
|
||||
# explicitly add it to delta so that the cli retains it.
|
||||
delta[pkl] = existing[pkl]
|
||||
elif param is None and existing[pkl]:
|
||||
# retain existing pkl commands even if not in playbook
|
||||
delta[pkl] = existing[pkl]
|
||||
|
||||
|
||||
def get_commands_to_config_vpc(module, vpc, domain, existing):
|
||||
vpc = dict(vpc)
|
||||
|
||||
|
@ -285,7 +315,7 @@ def get_commands_to_config_vpc(module, vpc, domain, existing):
|
|||
pkl_command = 'peer-keepalive destination {pkl_dest}'.format(**vpc)
|
||||
if 'pkl_src' in vpc:
|
||||
pkl_command += ' source {pkl_src}'.format(**vpc)
|
||||
if 'pkl_vrf' in vpc and vpc['pkl_vrf'] != 'management':
|
||||
if 'pkl_vrf' in vpc:
|
||||
pkl_command += ' vrf {pkl_vrf}'.format(**vpc)
|
||||
commands.append(pkl_command)
|
||||
|
||||
|
@ -389,11 +419,14 @@ def main():
|
|||
if state == 'present':
|
||||
delta = {}
|
||||
for key, value in proposed.items():
|
||||
if str(value).lower() == 'default':
|
||||
if str(value).lower() == 'default' and key != 'pkl_vrf':
|
||||
# 'default' is a reserved word for vrf
|
||||
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
|
||||
if existing.get(key) != value:
|
||||
delta[key] = value
|
||||
|
||||
if delta:
|
||||
pkl_dependencies(module, delta, existing)
|
||||
command = get_commands_to_config_vpc(module, delta, domain, existing)
|
||||
commands.append(command)
|
||||
elif state == 'absent':
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"kickstart_ver_str": "7.0(3)I5(3)"
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"TABLE_inv": {
|
||||
"ROW_inv": [
|
||||
{
|
||||
"name": "Chassis",
|
||||
"desc": "Nexus9000 C9504 (4 Slot) Chassis",
|
||||
"productid": "N9K-C9504",
|
||||
"vendorid": "V01",
|
||||
"serialnum": "BR-549"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"vpc-domain-id": "100"
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"TABLE_vrf": {
|
||||
"ROW_vrf": [
|
||||
{
|
||||
"vrf_name": "my_vrf",
|
||||
"vrf_id": 4,
|
||||
"vrf_state": "Up",
|
||||
"vrf_reason": "--"
|
||||
},
|
||||
{
|
||||
"vrf_name": "default",
|
||||
"vrf_id": 1,
|
||||
"vrf_state": "Up",
|
||||
"vrf_reason": "--"
|
||||
},
|
||||
{
|
||||
"vrf_name": "management",
|
||||
"vrf_id": 2,
|
||||
"vrf_state": "Up",
|
||||
"vrf_reason": "--"
|
||||
},
|
||||
{
|
||||
"vrf_name": "test-vrf",
|
||||
"vrf_id": 3,
|
||||
"vrf_state": "Up",
|
||||
"vrf_reason": "--"
|
||||
},
|
||||
{
|
||||
"vrf_name": "obviously-different-vrf",
|
||||
"vrf_id": 4,
|
||||
"vrf_state": "Up",
|
||||
"vrf_reason": "--"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
vpc domain 100
|
||||
peer-keepalive destination 192.168.1.1 source 10.1.1.1 vrf my_vrf
|
|
@ -31,6 +31,9 @@ class TestNxosVpcModule(TestNxosModule):
|
|||
def setUp(self):
|
||||
super(TestNxosVpcModule, self).setUp()
|
||||
|
||||
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vpc.get_config')
|
||||
self.get_config = self.mock_get_config.start()
|
||||
|
||||
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vpc.load_config')
|
||||
self.load_config = self.mock_load_config.start()
|
||||
|
||||
|
@ -39,6 +42,7 @@ class TestNxosVpcModule(TestNxosModule):
|
|||
|
||||
def tearDown(self):
|
||||
super(TestNxosVpcModule, self).tearDown()
|
||||
self.mock_get_config.stop()
|
||||
self.mock_load_config.stop()
|
||||
self.mock_run_commands.stop()
|
||||
|
||||
|
@ -52,7 +56,19 @@ class TestNxosVpcModule(TestNxosModule):
|
|||
output.append(load_fixture('nxos_vpc', filename))
|
||||
return output
|
||||
|
||||
def vrf_load_from_file(*args, **kwargs):
|
||||
"""Load vpc output for vrf tests"""
|
||||
module, commands = args
|
||||
output = list()
|
||||
for command in commands:
|
||||
filename = 'vrf_test_' + str(command).split(' | ')[0].replace(' ', '_')
|
||||
output.append(load_fixture('nxos_vpc', filename))
|
||||
return output
|
||||
|
||||
self.load_config.return_value = None
|
||||
if device == '_vrf_test':
|
||||
self.run_commands.side_effect = vrf_load_from_file
|
||||
else:
|
||||
self.run_commands.side_effect = load_from_file
|
||||
|
||||
def test_nxos_vpc_present(self):
|
||||
|
@ -64,3 +80,103 @@ class TestNxosVpcModule(TestNxosModule):
|
|||
'peer-keepalive destination 192.168.100.4 source 10.1.100.20',
|
||||
'peer-gateway', 'auto-recovery',
|
||||
])
|
||||
|
||||
def test_nxos_vpc_vrf_1(self):
|
||||
# No vrf -> vrf 'default'
|
||||
set_module_args(dict(
|
||||
domain=100,
|
||||
pkl_dest='192.168.1.1',
|
||||
pkl_src='10.1.1.1',
|
||||
pkl_vrf='default',
|
||||
))
|
||||
self.execute_module(changed=True, commands=[
|
||||
'vpc domain 100',
|
||||
'peer-keepalive destination 192.168.1.1 source 10.1.1.1 vrf default'
|
||||
])
|
||||
|
||||
def test_nxos_vpc_vrf_2(self):
|
||||
# vrf 'my_vrf'-> vrf 'test-vrf'
|
||||
# All pkl commands should be present
|
||||
self.get_config.return_value = load_fixture('nxos_vpc', 'vrf_test_vpc_config')
|
||||
set_module_args(dict(
|
||||
domain=100,
|
||||
pkl_dest='192.168.1.1',
|
||||
pkl_src='10.1.1.1',
|
||||
pkl_vrf='test-vrf',
|
||||
))
|
||||
self.execute_module(changed=True, device='_vrf_test', commands=[
|
||||
'vpc domain 100',
|
||||
'peer-keepalive destination 192.168.1.1 source 10.1.1.1 vrf test-vrf'
|
||||
])
|
||||
|
||||
def test_nxos_vpc_vrf_3(self):
|
||||
# vrf 'my_vrf' -> vrf 'obviously-different-vrf'
|
||||
# Existing pkl_src should be retained even though playbook does not specify it
|
||||
self.get_config.return_value = load_fixture('nxos_vpc', 'vrf_test_vpc_config')
|
||||
set_module_args(dict(
|
||||
domain=100,
|
||||
pkl_dest='192.168.1.1',
|
||||
pkl_vrf='obviously-different-vrf'
|
||||
))
|
||||
self.execute_module(changed=True, device='_vrf_test', commands=[
|
||||
'vpc domain 100',
|
||||
'peer-keepalive destination 192.168.1.1 source 10.1.1.1 vrf obviously-different-vrf'
|
||||
])
|
||||
|
||||
def test_nxos_vpc_vrf_4(self):
|
||||
# vrf 'my_vrf'-> vrf 'management'
|
||||
# 'management' is the default value for vrf, it will not nvgen
|
||||
self.get_config.return_value = load_fixture('nxos_vpc', 'vrf_test_vpc_config')
|
||||
set_module_args(dict(
|
||||
domain=100,
|
||||
pkl_dest='192.168.1.1',
|
||||
pkl_vrf='management',
|
||||
))
|
||||
self.execute_module(changed=True, device='_vrf_test', commands=[
|
||||
'vpc domain 100',
|
||||
'peer-keepalive destination 192.168.1.1 source 10.1.1.1 vrf management'
|
||||
])
|
||||
|
||||
def test_nxos_vpc_vrf_5(self):
|
||||
# vrf 'my_vrf' -> vrf 'my_vrf' (idempotence)
|
||||
self.get_config.return_value = load_fixture('nxos_vpc', 'vrf_test_vpc_config')
|
||||
set_module_args(dict(
|
||||
domain=100,
|
||||
pkl_dest='192.168.1.1',
|
||||
pkl_src='10.1.1.1',
|
||||
pkl_vrf='my_vrf',
|
||||
))
|
||||
self.execute_module(changed=False, device='_vrf_test')
|
||||
|
||||
def test_nxos_vpc_vrf_6(self):
|
||||
# vrf 'my_vrf' -> absent tests
|
||||
self.get_config.return_value = load_fixture('nxos_vpc', 'vrf_test_vpc_config')
|
||||
set_module_args(dict(
|
||||
domain=100,
|
||||
state='absent'
|
||||
))
|
||||
self.execute_module(changed=True, device='_vrf_test', commands=[
|
||||
'terminal dont-ask',
|
||||
'no vpc domain 100',
|
||||
])
|
||||
|
||||
def test_nxos_vpc_vrf_7(self):
|
||||
# dest 192.168.1.1 source 10.1.1.1 vrf my_vrf -> (dest only) (idempotence)
|
||||
# pkl_src/pkl_vrf not in playbook but exists on device.
|
||||
self.get_config.return_value = load_fixture('nxos_vpc', 'vrf_test_vpc_config')
|
||||
set_module_args(dict(
|
||||
domain=100,
|
||||
pkl_dest='192.168.1.1',
|
||||
))
|
||||
self.execute_module(changed=False, device='_vrf_test')
|
||||
|
||||
def test_nxos_vpc_vrf_8(self):
|
||||
# dest 192.168.1.1 source 10.1.1.1 vrf my_vrf -> (optional vrf) (idempotence)
|
||||
# pkl_src not in playbook but exists on device.
|
||||
self.get_config.return_value = load_fixture('nxos_vpc', 'vrf_test_vpc_config')
|
||||
set_module_args(dict(
|
||||
domain=100,
|
||||
pkl_dest='192.168.1.1',
|
||||
pkl_vrf='my_vrf',
|
||||
))
|
||||
self.execute_module(changed=False, device='_vrf_test')
|
||||
|
|
Loading…
Reference in a new issue