kubevirt: Add affinity parameters (#57359)
This commit is contained in:
parent
f7de9cb39f
commit
886f4c66ca
3 changed files with 128 additions and 1 deletions
|
@ -41,6 +41,9 @@ VM_SPEC_DEF_ARG_SPEC = {
|
||||||
'cpu_limit': {'type': 'int'},
|
'cpu_limit': {'type': 'int'},
|
||||||
'cpu_shares': {'type': 'int'},
|
'cpu_shares': {'type': 'int'},
|
||||||
'cpu_features': {'type': 'list'},
|
'cpu_features': {'type': 'list'},
|
||||||
|
'affinity': {'type': 'dict'},
|
||||||
|
'anti_affinity': {'type': 'dict'},
|
||||||
|
'node_affinity': {'type': 'dict'},
|
||||||
}
|
}
|
||||||
# And other common args go here:
|
# And other common args go here:
|
||||||
VM_COMMON_ARG_SPEC = {
|
VM_COMMON_ARG_SPEC = {
|
||||||
|
@ -323,6 +326,9 @@ class KubeVirtRawModule(KubernetesRawModule):
|
||||||
tablets = params.get('tablets')
|
tablets = params.get('tablets')
|
||||||
cpu_shares = params.get('cpu_shares')
|
cpu_shares = params.get('cpu_shares')
|
||||||
cpu_limit = params.get('cpu_limit')
|
cpu_limit = params.get('cpu_limit')
|
||||||
|
node_affinity = params.get('node_affinity')
|
||||||
|
vm_affinity = params.get('affinity')
|
||||||
|
vm_anti_affinity = params.get('anti_affinity')
|
||||||
template_spec = template['spec']
|
template_spec = template['spec']
|
||||||
|
|
||||||
# Merge additional flat parameters:
|
# Merge additional flat parameters:
|
||||||
|
@ -370,6 +376,48 @@ class KubeVirtRawModule(KubernetesRawModule):
|
||||||
if headless is not None:
|
if headless is not None:
|
||||||
template_spec['domain']['devices']['autoattachGraphicsDevice'] = not headless
|
template_spec['domain']['devices']['autoattachGraphicsDevice'] = not headless
|
||||||
|
|
||||||
|
if vm_affinity or vm_anti_affinity:
|
||||||
|
vms_affinity = vm_affinity or vm_anti_affinity
|
||||||
|
affinity_name = 'podAffinity' if vm_affinity else 'podAntiAffinity'
|
||||||
|
for affinity in vms_affinity.get('soft', []):
|
||||||
|
if not template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution']:
|
||||||
|
template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'] = []
|
||||||
|
template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'].append({
|
||||||
|
'weight': affinity.get('weight'),
|
||||||
|
'podAffinityTerm': {
|
||||||
|
'labelSelector': {
|
||||||
|
'matchExpressions': affinity.get('term').get('match_expressions'),
|
||||||
|
},
|
||||||
|
'topologyKey': affinity.get('topology_key'),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
for affinity in vms_affinity.get('hard', []):
|
||||||
|
if not template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution']:
|
||||||
|
template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'] = []
|
||||||
|
template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'].append({
|
||||||
|
'labelSelector': {
|
||||||
|
'matchExpressions': affinity.get('term').get('match_expressions'),
|
||||||
|
},
|
||||||
|
'topologyKey': affinity.get('topology_key'),
|
||||||
|
})
|
||||||
|
|
||||||
|
if node_affinity:
|
||||||
|
for affinity in node_affinity.get('soft', []):
|
||||||
|
if not template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution']:
|
||||||
|
template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'] = []
|
||||||
|
template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'].append({
|
||||||
|
'weight': affinity.get('weight'),
|
||||||
|
'preference': {
|
||||||
|
'matchExpressions': affinity.get('term').get('match_expressions'),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
for affinity in node_affinity.get('hard', []):
|
||||||
|
if not template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms']:
|
||||||
|
template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'] = []
|
||||||
|
template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'].append({
|
||||||
|
'matchExpressions': affinity.get('term').get('match_expressions'),
|
||||||
|
})
|
||||||
|
|
||||||
# Define disks
|
# Define disks
|
||||||
self._define_disks(disks, template_spec, defaults)
|
self._define_disks(disks, template_spec, defaults)
|
||||||
|
|
||||||
|
|
|
@ -194,8 +194,17 @@ EXAMPLES = '''
|
||||||
path: /disk/fedora.qcow2
|
path: /disk/fedora.qcow2
|
||||||
disk:
|
disk:
|
||||||
bus: virtio
|
bus: virtio
|
||||||
|
node_affinity:
|
||||||
|
soft:
|
||||||
|
- weight: 1
|
||||||
|
term:
|
||||||
|
match_expressions:
|
||||||
|
- key: security
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- S2
|
||||||
|
|
||||||
- name: Create virtual machine with datavolume
|
- name: Create virtual machine with datavolume and specify node affinity
|
||||||
kubevirt_vm:
|
kubevirt_vm:
|
||||||
name: myvm
|
name: myvm
|
||||||
namespace: default
|
namespace: default
|
||||||
|
@ -209,6 +218,14 @@ EXAMPLES = '''
|
||||||
accessModes:
|
accessModes:
|
||||||
- ReadWriteOnce
|
- ReadWriteOnce
|
||||||
storage: 5Gi
|
storage: 5Gi
|
||||||
|
node_affinity:
|
||||||
|
hard:
|
||||||
|
- term:
|
||||||
|
match_expressions:
|
||||||
|
- key: security
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- S1
|
||||||
|
|
||||||
- name: Remove virtual machine 'myvm'
|
- name: Remove virtual machine 'myvm'
|
||||||
kubevirt_vm:
|
kubevirt_vm:
|
||||||
|
|
|
@ -38,4 +38,66 @@ options:
|
||||||
as a disk to the virtual machine. A proper cloud-init installation is required inside the guest.
|
as a disk to the virtual machine. A proper cloud-init installation is required inside the guest.
|
||||||
More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)"
|
More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)"
|
||||||
type: dict
|
type: dict
|
||||||
|
affinity:
|
||||||
|
description:
|
||||||
|
- "Describes node affinity scheduling rules for the vm."
|
||||||
|
type: dict
|
||||||
|
version_added: 2.9
|
||||||
|
suboptions:
|
||||||
|
soft:
|
||||||
|
description:
|
||||||
|
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose a
|
||||||
|
node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for
|
||||||
|
each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute
|
||||||
|
a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches the corresponding
|
||||||
|
C(term); the nodes with the highest sum are the most preferred."
|
||||||
|
type: dict
|
||||||
|
hard:
|
||||||
|
description:
|
||||||
|
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
||||||
|
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label update), the
|
||||||
|
system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes corresponding to
|
||||||
|
each C(term) are intersected, i.e. all terms must be satisfied."
|
||||||
|
type: dict
|
||||||
|
node_affinity:
|
||||||
|
description:
|
||||||
|
- "Describes vm affinity scheduling rules e.g. co-locate this vm in the same node, zone, etc. as some other vms"
|
||||||
|
type: dict
|
||||||
|
version_added: 2.9
|
||||||
|
suboptions:
|
||||||
|
soft:
|
||||||
|
description:
|
||||||
|
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose
|
||||||
|
a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e.
|
||||||
|
for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.),
|
||||||
|
compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node matches the corresponding
|
||||||
|
match_expressions; the nodes with the highest sum are the most preferred."
|
||||||
|
type: dict
|
||||||
|
hard:
|
||||||
|
description:
|
||||||
|
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
||||||
|
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to an update), the system
|
||||||
|
may or may not try to eventually evict the vm from its node."
|
||||||
|
type: dict
|
||||||
|
anti_affinity:
|
||||||
|
description:
|
||||||
|
- "Describes vm anti-affinity scheduling rules e.g. avoid putting this vm in the same node, zone, etc. as some other vms."
|
||||||
|
type: dict
|
||||||
|
version_added: 2.9
|
||||||
|
suboptions:
|
||||||
|
soft:
|
||||||
|
description:
|
||||||
|
- "The scheduler will prefer to schedule vms to nodes that satisfy the anti-affinity expressions specified by this field, but it may
|
||||||
|
choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights,
|
||||||
|
i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions,
|
||||||
|
etc.), compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches
|
||||||
|
the corresponding C(term); the nodes with the highest sum are the most preferred."
|
||||||
|
type: dict
|
||||||
|
hard:
|
||||||
|
description:
|
||||||
|
- "If the anti-affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node.
|
||||||
|
If the anti-affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label
|
||||||
|
update), the system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes
|
||||||
|
corresponding to each C(term) are intersected, i.e. all terms must be satisfied."
|
||||||
|
type: dict
|
||||||
'''
|
'''
|
||||||
|
|
Loading…
Reference in a new issue