fix: adds affinity and other scheduling to the operator (#29977)
closes: #29258 Signed-off-by: Steve Hawkins <shawkins@redhat.com>
This commit is contained in:
parent
f010f7df9b
commit
a7ae90cbb6
7 changed files with 290 additions and 0 deletions
|
@ -22,3 +22,10 @@ WARNING: JBoss Marshalling and Infinispan Protostream are not compatible with ea
|
||||||
Consequently, all caches are cleared when upgrading to this version.
|
Consequently, all caches are cleared when upgrading to this version.
|
||||||
|
|
||||||
Infinispan Protostream is based on https://protobuf.dev/programming-guides/proto3/[Protocol Buffers] (proto 3), which has the advantage of backwards/forwards compatibility.
|
Infinispan Protostream is based on https://protobuf.dev/programming-guides/proto3/[Protocol Buffers] (proto 3), which has the advantage of backwards/forwards compatibility.
|
||||||
|
|
||||||
|
= Keycloak CR supports standard scheduling options
|
||||||
|
|
||||||
|
The Keycloak CR now exposes first class properties for controlling the scheduling of your Keycloak Pods.
|
||||||
|
|
||||||
|
For more details, see the
|
||||||
|
https://www.keycloak.org/operator/advanced-configuration[Operator Advanced Configuration].
|
||||||
|
|
|
@ -21,6 +21,10 @@ This is an optimization to reduce traffic and network related resources.
|
||||||
In {project_name} 26, the new method has a default implementation to keep backward compatibility with custom implementation.
|
In {project_name} 26, the new method has a default implementation to keep backward compatibility with custom implementation.
|
||||||
The default implementation performs a single network call per an event, and it will be removed in a future version of {project_name}.
|
The default implementation performs a single network call per an event, and it will be removed in a future version of {project_name}.
|
||||||
|
|
||||||
|
= Operator scheduling defaults
|
||||||
|
|
||||||
|
Keycloak Pods will now have default affinities to prevent multiple instances from the same CR from being deployed on the same node, and all Pods from the same CR will prefer to be in the same zone to prevent stretch cache clusters.
|
||||||
|
|
||||||
= Operator's default CPU and memory limits/requests
|
= Operator's default CPU and memory limits/requests
|
||||||
|
|
||||||
In order to follow the best practices, the default CPU and memory limits/requests for the Operator were introduced. It affects both non-OLM and OLM installs. To override the default values for the OLM install, edit the `resources` section in the operator's https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/subscription-config.md#resources[subscription].
|
In order to follow the best practices, the default CPU and memory limits/requests for the Operator were introduced. It affects both non-OLM and OLM installs. To override the default values for the OLM install, edit the `resources` section in the operator's https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/subscription-config.md#resources[subscription].
|
||||||
|
|
|
@ -204,6 +204,48 @@ It is achieved by providing certain JVM options.
|
||||||
|
|
||||||
For more details, see <@links.server id="containers" />.
|
For more details, see <@links.server id="containers" />.
|
||||||
|
|
||||||
|
=== Scheduling
|
||||||
|
|
||||||
|
You may control several aspects of the server Pod scheduling via the Keycloak CR. The scheduling stanza exposes optional standard Kubernetes affinity, tolerations, topology spread constraints, and the priority class name to fine tune the scheduling and placement of your server Pods.
|
||||||
|
|
||||||
|
An example utilizing all scheduling fields:
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
----
|
||||||
|
apiVersion: k8s.keycloak.org/v2alpha1
|
||||||
|
kind: Keycloak
|
||||||
|
metadata:
|
||||||
|
name: example-kc
|
||||||
|
spec:
|
||||||
|
scheduling:
|
||||||
|
priorityClassName: custom-high
|
||||||
|
affinity:
|
||||||
|
podAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
|
matchLabels:
|
||||||
|
app: keycloak
|
||||||
|
app.kubernetes.io/managed-by: keycloak-operator
|
||||||
|
app.kubernetes.io/component: server
|
||||||
|
topologyKey: topology.kubernetes.io/zone
|
||||||
|
weight: 10
|
||||||
|
tolerations:
|
||||||
|
- key: "some-taint"
|
||||||
|
operator: "Exists"
|
||||||
|
effect: "NoSchedule"
|
||||||
|
topologySpreadConstraints:
|
||||||
|
- maxSkew: 1
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
whenUnsatisfiable: DoNotSchedule
|
||||||
|
...
|
||||||
|
...
|
||||||
|
----
|
||||||
|
|
||||||
|
Please see https://kubernetes.io/docs/concepts/scheduling-eviction[the kubernetes docs] for more on scheduling concepts.
|
||||||
|
|
||||||
|
If you do not specify a custom affinity, your Pods will have an affinity for the same zone and an anti-affinity for the same node to improve availability. Scheduling to the same zone if possible helps prevent stretch clusters where cross zone cache cluster traffic may have too high of a latency.
|
||||||
|
|
||||||
=== Management Interface
|
=== Management Interface
|
||||||
|
|
||||||
To change the port of the management interface, use the first-class citizen field `httpManagement.port` in the Keycloak CR.
|
To change the port of the management interface, use the first-class citizen field `httpManagement.port` in the Keycloak CR.
|
||||||
|
|
|
@ -23,6 +23,7 @@ import io.fabric8.kubernetes.api.model.EnvVarBuilder;
|
||||||
import io.fabric8.kubernetes.api.model.EnvVarSource;
|
import io.fabric8.kubernetes.api.model.EnvVarSource;
|
||||||
import io.fabric8.kubernetes.api.model.EnvVarSourceBuilder;
|
import io.fabric8.kubernetes.api.model.EnvVarSourceBuilder;
|
||||||
import io.fabric8.kubernetes.api.model.PodSpec;
|
import io.fabric8.kubernetes.api.model.PodSpec;
|
||||||
|
import io.fabric8.kubernetes.api.model.PodSpecFluent;
|
||||||
import io.fabric8.kubernetes.api.model.PodTemplateSpec;
|
import io.fabric8.kubernetes.api.model.PodTemplateSpec;
|
||||||
import io.fabric8.kubernetes.api.model.Secret;
|
import io.fabric8.kubernetes.api.model.Secret;
|
||||||
import io.fabric8.kubernetes.api.model.SecretKeySelector;
|
import io.fabric8.kubernetes.api.model.SecretKeySelector;
|
||||||
|
@ -45,6 +46,7 @@ import org.keycloak.operator.crds.v2alpha1.deployment.KeycloakSpec;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.ValueOrSecret;
|
import org.keycloak.operator.crds.v2alpha1.deployment.ValueOrSecret;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.CacheSpec;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.CacheSpec;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.HttpManagementSpec;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.HttpManagementSpec;
|
||||||
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.SchedulingSpec;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.Truststore;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.Truststore;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.TruststoreSource;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.TruststoreSource;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.UnsupportedSpec;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.UnsupportedSpec;
|
||||||
|
@ -77,6 +79,8 @@ public class KeycloakDeploymentDependentResource extends CRUDKubernetesDependent
|
||||||
|
|
||||||
private static final List<String> COPY_ENV = Arrays.asList("HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY");
|
private static final List<String> COPY_ENV = Arrays.asList("HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY");
|
||||||
|
|
||||||
|
private static final String ZONE_KEY = "topology.kubernetes.io/zone";
|
||||||
|
|
||||||
private static final String SERVICE_ACCOUNT_DIR = "/var/run/secrets/kubernetes.io/serviceaccount/";
|
private static final String SERVICE_ACCOUNT_DIR = "/var/run/secrets/kubernetes.io/serviceaccount/";
|
||||||
private static final String SERVICE_CA_CRT = SERVICE_ACCOUNT_DIR + "service-ca.crt";
|
private static final String SERVICE_CA_CRT = SERVICE_ACCOUNT_DIR + "service-ca.crt";
|
||||||
|
|
||||||
|
@ -227,6 +231,8 @@ public class KeycloakDeploymentDependentResource extends CRUDKubernetesDependent
|
||||||
|
|
||||||
private StatefulSet createBaseDeployment(Keycloak keycloakCR, Context<Keycloak> context) {
|
private StatefulSet createBaseDeployment(Keycloak keycloakCR, Context<Keycloak> context) {
|
||||||
Map<String, String> labels = Utils.allInstanceLabels(keycloakCR);
|
Map<String, String> labels = Utils.allInstanceLabels(keycloakCR);
|
||||||
|
labels.put("app.kubernetes.io/component", "server");
|
||||||
|
Map<String, String> schedulingLabels = new LinkedHashMap<>(labels);
|
||||||
if (operatorConfig.keycloak().podLabels() != null) {
|
if (operatorConfig.keycloak().podLabels() != null) {
|
||||||
labels.putAll(operatorConfig.keycloak().podLabels());
|
labels.putAll(operatorConfig.keycloak().podLabels());
|
||||||
}
|
}
|
||||||
|
@ -264,6 +270,7 @@ public class KeycloakDeploymentDependentResource extends CRUDKubernetesDependent
|
||||||
if (!specBuilder.hasDnsPolicy()) {
|
if (!specBuilder.hasDnsPolicy()) {
|
||||||
specBuilder.withDnsPolicy("ClusterFirst");
|
specBuilder.withDnsPolicy("ClusterFirst");
|
||||||
}
|
}
|
||||||
|
handleScheduling(keycloakCR, schedulingLabels, specBuilder);
|
||||||
|
|
||||||
// there isn't currently an editOrNewFirstContainer, so we need to do this manually
|
// there isn't currently an editOrNewFirstContainer, so we need to do this manually
|
||||||
var containerBuilder = specBuilder.buildContainers().isEmpty() ? specBuilder.addNewContainer() : specBuilder.editFirstContainer();
|
var containerBuilder = specBuilder.buildContainers().isEmpty() ? specBuilder.addNewContainer() : specBuilder.editFirstContainer();
|
||||||
|
@ -353,6 +360,42 @@ public class KeycloakDeploymentDependentResource extends CRUDKubernetesDependent
|
||||||
return baseDeployment;
|
return baseDeployment;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void handleScheduling(Keycloak keycloakCR, Map<String, String> labels, PodSpecFluent<?> specBuilder) {
|
||||||
|
SchedulingSpec schedulingSpec = keycloakCR.getSpec().getSchedulingSpec();
|
||||||
|
if (schedulingSpec != null) {
|
||||||
|
if (!specBuilder.hasPriorityClassName()) {
|
||||||
|
specBuilder.withPriorityClassName(schedulingSpec.getPriorityClassName());
|
||||||
|
}
|
||||||
|
if (!specBuilder.hasAffinity()) {
|
||||||
|
specBuilder.withAffinity(schedulingSpec.getAffinity());
|
||||||
|
}
|
||||||
|
if (!specBuilder.hasTolerations()) {
|
||||||
|
specBuilder.withTolerations(schedulingSpec.getTolerations());
|
||||||
|
}
|
||||||
|
if (!specBuilder.hasTopologySpreadConstraints()) {
|
||||||
|
specBuilder.withTopologySpreadConstraints(schedulingSpec.getTopologySpreadConstraints());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set defaults if nothing was specified by the user
|
||||||
|
// - server pods will have an affinity for the same zone as to avoid stretch clusters
|
||||||
|
// - server pods will have a stronger anti-affinity for the same node
|
||||||
|
|
||||||
|
if (!specBuilder.hasAffinity()) {
|
||||||
|
specBuilder.editOrNewAffinity().withNewPodAffinity().addNewPreferredDuringSchedulingIgnoredDuringExecution()
|
||||||
|
.withWeight(10).withNewPodAffinityTerm().withNewLabelSelector().withMatchLabels(labels)
|
||||||
|
.endLabelSelector().withTopologyKey(ZONE_KEY).endPodAffinityTerm()
|
||||||
|
.endPreferredDuringSchedulingIgnoredDuringExecution().endPodAffinity().endAffinity();
|
||||||
|
|
||||||
|
specBuilder.editOrNewAffinity().withNewPodAntiAffinity()
|
||||||
|
.addNewPreferredDuringSchedulingIgnoredDuringExecution().withWeight(50).withNewPodAffinityTerm()
|
||||||
|
.withNewLabelSelector().withMatchLabels(labels).endLabelSelector()
|
||||||
|
.withTopologyKey("kubernetes.io/hostname").endPodAffinityTerm()
|
||||||
|
.endPreferredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().endAffinity();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
private static String getJGroupsParameter(Keycloak keycloakCR) {
|
private static String getJGroupsParameter(Keycloak keycloakCR) {
|
||||||
return JGROUPS_DNS_QUERY_PARAM + KeycloakDiscoveryServiceDependentResource.getName(keycloakCR) +"." + keycloakCR.getMetadata().getNamespace();
|
return JGROUPS_DNS_QUERY_PARAM + KeycloakDiscoveryServiceDependentResource.getName(keycloakCR) +"." + keycloakCR.getMetadata().getNamespace();
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.keycloak.operator.crds.v2alpha1.deployment.spec.HttpManagementSpec;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.HttpSpec;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.HttpSpec;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.IngressSpec;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.IngressSpec;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.ProxySpec;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.ProxySpec;
|
||||||
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.SchedulingSpec;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.TransactionsSpec;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.TransactionsSpec;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.Truststore;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.Truststore;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.UnsupportedSpec;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.UnsupportedSpec;
|
||||||
|
@ -110,6 +111,10 @@ public class KeycloakSpec {
|
||||||
@JsonPropertyDescription("In this section you can configure Keycloak's management interface setting.")
|
@JsonPropertyDescription("In this section you can configure Keycloak's management interface setting.")
|
||||||
private HttpManagementSpec httpManagementSpec;
|
private HttpManagementSpec httpManagementSpec;
|
||||||
|
|
||||||
|
@JsonProperty("scheduling")
|
||||||
|
@JsonPropertyDescription("In this section you can configure Keycloak's scheduling")
|
||||||
|
private SchedulingSpec schedulingSpec;
|
||||||
|
|
||||||
public HttpSpec getHttpSpec() {
|
public HttpSpec getHttpSpec() {
|
||||||
return httpSpec;
|
return httpSpec;
|
||||||
}
|
}
|
||||||
|
@ -251,4 +256,12 @@ public class KeycloakSpec {
|
||||||
public void setProxySpec(ProxySpec proxySpec) {
|
public void setProxySpec(ProxySpec proxySpec) {
|
||||||
this.proxySpec = proxySpec;
|
this.proxySpec = proxySpec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public SchedulingSpec getSchedulingSpec() {
|
||||||
|
return schedulingSpec;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setSchedulingSpec(SchedulingSpec schedulingSpec) {
|
||||||
|
this.schedulingSpec = schedulingSpec;
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
package org.keycloak.operator.crds.v2alpha1.deployment.spec;
|
||||||
|
|
||||||
|
import io.fabric8.kubernetes.api.model.Affinity;
|
||||||
|
import io.fabric8.kubernetes.api.model.Toleration;
|
||||||
|
import io.fabric8.kubernetes.api.model.TopologySpreadConstraint;
|
||||||
|
import io.sundr.builder.annotations.Buildable;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||||
|
|
||||||
|
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||||
|
@Buildable(editableEnabled = false, builderPackage = "io.fabric8.kubernetes.api.builder")
|
||||||
|
public class SchedulingSpec {
|
||||||
|
|
||||||
|
private Affinity affinity;
|
||||||
|
@JsonInclude(JsonInclude.Include.NON_EMPTY)
|
||||||
|
private List<Toleration> tolerations = new ArrayList<Toleration>();
|
||||||
|
@JsonInclude(JsonInclude.Include.NON_EMPTY)
|
||||||
|
private List<TopologySpreadConstraint> topologySpreadConstraints = new ArrayList<TopologySpreadConstraint>();
|
||||||
|
private String priorityClassName;
|
||||||
|
|
||||||
|
public Affinity getAffinity() {
|
||||||
|
return affinity;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setAffinity(Affinity affinity) {
|
||||||
|
this.affinity = affinity;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<Toleration> getTolerations() {
|
||||||
|
return tolerations;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTolerations(List<Toleration> tolerations) {
|
||||||
|
this.tolerations = tolerations;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<TopologySpreadConstraint> getTopologySpreadConstraints() {
|
||||||
|
return topologySpreadConstraints;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTopologySpreadConstraints(List<TopologySpreadConstraint> topologySpreadConstraints) {
|
||||||
|
this.topologySpreadConstraints = topologySpreadConstraints;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPriorityClassName() {
|
||||||
|
return priorityClassName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setPriorityClassName(String priorityClassName) {
|
||||||
|
this.priorityClassName = priorityClassName;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -17,16 +17,22 @@
|
||||||
|
|
||||||
package org.keycloak.operator.testsuite.unit;
|
package org.keycloak.operator.testsuite.unit;
|
||||||
|
|
||||||
|
import io.fabric8.kubernetes.api.model.Affinity;
|
||||||
|
import io.fabric8.kubernetes.api.model.AffinityBuilder;
|
||||||
import io.fabric8.kubernetes.api.model.Container;
|
import io.fabric8.kubernetes.api.model.Container;
|
||||||
import io.fabric8.kubernetes.api.model.EnvVar;
|
import io.fabric8.kubernetes.api.model.EnvVar;
|
||||||
import io.fabric8.kubernetes.api.model.IntOrString;
|
import io.fabric8.kubernetes.api.model.IntOrString;
|
||||||
import io.fabric8.kubernetes.api.model.PodTemplateSpec;
|
import io.fabric8.kubernetes.api.model.PodTemplateSpec;
|
||||||
import io.fabric8.kubernetes.api.model.PodTemplateSpecBuilder;
|
import io.fabric8.kubernetes.api.model.PodTemplateSpecBuilder;
|
||||||
import io.fabric8.kubernetes.api.model.ProbeBuilder;
|
import io.fabric8.kubernetes.api.model.ProbeBuilder;
|
||||||
|
import io.fabric8.kubernetes.api.model.Toleration;
|
||||||
|
import io.fabric8.kubernetes.api.model.TopologySpreadConstraint;
|
||||||
|
import io.fabric8.kubernetes.api.model.TopologySpreadConstraintBuilder;
|
||||||
import io.fabric8.kubernetes.api.model.Volume;
|
import io.fabric8.kubernetes.api.model.Volume;
|
||||||
import io.fabric8.kubernetes.api.model.VolumeMount;
|
import io.fabric8.kubernetes.api.model.VolumeMount;
|
||||||
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
|
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
|
||||||
import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder;
|
import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder;
|
||||||
|
import io.fabric8.kubernetes.client.utils.Serialization;
|
||||||
import io.javaoperatorsdk.operator.api.reconciler.Context;
|
import io.javaoperatorsdk.operator.api.reconciler.Context;
|
||||||
import io.quarkus.test.InjectMock;
|
import io.quarkus.test.InjectMock;
|
||||||
import io.quarkus.test.junit.QuarkusTest;
|
import io.quarkus.test.junit.QuarkusTest;
|
||||||
|
@ -46,6 +52,7 @@ import org.keycloak.operator.crds.v2alpha1.deployment.spec.HttpSpecBuilder;
|
||||||
import org.keycloak.operator.crds.v2alpha1.deployment.spec.UnsupportedSpec;
|
import org.keycloak.operator.crds.v2alpha1.deployment.spec.UnsupportedSpec;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
@ -398,6 +405,32 @@ public class PodTemplateTest {
|
||||||
assertNotNull(startup);
|
assertNotNull(startup);
|
||||||
assertThat(startup.getPath()).isEqualTo("/health/started");
|
assertThat(startup.getPath()).isEqualTo("/health/started");
|
||||||
assertThat(startup.getPort().getIntVal()).isEqualTo(Constants.KEYCLOAK_MANAGEMENT_PORT);
|
assertThat(startup.getPort().getIntVal()).isEqualTo(Constants.KEYCLOAK_MANAGEMENT_PORT);
|
||||||
|
|
||||||
|
var affinity = podTemplate.getSpec().getAffinity();
|
||||||
|
assertNotNull(affinity);
|
||||||
|
assertThat(Serialization.asYaml(affinity)).isEqualTo("---\n"
|
||||||
|
+ "podAffinity:\n"
|
||||||
|
+ " preferredDuringSchedulingIgnoredDuringExecution:\n"
|
||||||
|
+ " - podAffinityTerm:\n"
|
||||||
|
+ " labelSelector:\n"
|
||||||
|
+ " matchLabels:\n"
|
||||||
|
+ " app: \"keycloak\"\n"
|
||||||
|
+ " app.kubernetes.io/managed-by: \"keycloak-operator\"\n"
|
||||||
|
+ " app.kubernetes.io/instance: \"instance\"\n"
|
||||||
|
+ " app.kubernetes.io/component: \"server\"\n"
|
||||||
|
+ " topologyKey: \"topology.kubernetes.io/zone\"\n"
|
||||||
|
+ " weight: 10\n"
|
||||||
|
+ "podAntiAffinity:\n"
|
||||||
|
+ " preferredDuringSchedulingIgnoredDuringExecution:\n"
|
||||||
|
+ " - podAffinityTerm:\n"
|
||||||
|
+ " labelSelector:\n"
|
||||||
|
+ " matchLabels:\n"
|
||||||
|
+ " app: \"keycloak\"\n"
|
||||||
|
+ " app.kubernetes.io/managed-by: \"keycloak-operator\"\n"
|
||||||
|
+ " app.kubernetes.io/instance: \"instance\"\n"
|
||||||
|
+ " app.kubernetes.io/component: \"server\"\n"
|
||||||
|
+ " topologyKey: \"kubernetes.io/hostname\"\n"
|
||||||
|
+ " weight: 50\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -484,4 +517,96 @@ public class PodTemplateTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPriorityClass() {
|
||||||
|
// Arrange
|
||||||
|
PodTemplateSpec additionalPodTemplate = null;
|
||||||
|
|
||||||
|
// Act
|
||||||
|
var podTemplate = getDeployment(additionalPodTemplate, null,
|
||||||
|
s -> s.withNewSchedulingSpec().withPriorityClassName("important").endSchedulingSpec())
|
||||||
|
.getSpec().getTemplate();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assertThat(podTemplate.getSpec().getPriorityClassName()).isEqualTo("important");
|
||||||
|
|
||||||
|
podTemplate = getDeployment(new PodTemplateSpecBuilder().withNewSpec().withPriorityClassName("existing").endSpec().build(), null,
|
||||||
|
s -> s.withNewSchedulingSpec().withPriorityClassName("important").endSchedulingSpec())
|
||||||
|
.getSpec().getTemplate();
|
||||||
|
|
||||||
|
assertThat(podTemplate.getSpec().getPriorityClassName()).isEqualTo("existing");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTolerations() {
|
||||||
|
// Arrange
|
||||||
|
PodTemplateSpec additionalPodTemplate = null;
|
||||||
|
|
||||||
|
Toleration toleration = new Toleration("NoSchedule", "key", "=", null, "value");
|
||||||
|
|
||||||
|
// Act
|
||||||
|
var podTemplate = getDeployment(additionalPodTemplate, null,
|
||||||
|
s -> s.withNewSchedulingSpec().addToTolerations(toleration).endSchedulingSpec())
|
||||||
|
.getSpec().getTemplate();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assertThat(podTemplate.getSpec().getTolerations()).isEqualTo(Arrays.asList(toleration));
|
||||||
|
|
||||||
|
podTemplate = getDeployment(new PodTemplateSpecBuilder().withNewSpec().withTolerations(new Toleration()).endSpec().build(), null,
|
||||||
|
s -> s.withNewSchedulingSpec().addToTolerations(toleration).endSchedulingSpec())
|
||||||
|
.getSpec().getTemplate();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assertThat(podTemplate.getSpec().getTolerations()).isNotEqualTo(Arrays.asList(toleration));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTopologySpreadConstraints() {
|
||||||
|
// Arrange
|
||||||
|
PodTemplateSpec additionalPodTemplate = null;
|
||||||
|
|
||||||
|
TopologySpreadConstraint tsc = new TopologySpreadConstraintBuilder().withTopologyKey("key").build();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
var podTemplate = getDeployment(additionalPodTemplate, null,
|
||||||
|
s -> s.withNewSchedulingSpec().addToTopologySpreadConstraints(tsc).endSchedulingSpec())
|
||||||
|
.getSpec().getTemplate();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assertThat(podTemplate.getSpec().getTopologySpreadConstraints()).isEqualTo(Arrays.asList(tsc));
|
||||||
|
|
||||||
|
podTemplate = getDeployment(new PodTemplateSpecBuilder().withNewSpec().withTopologySpreadConstraints(new TopologySpreadConstraint()).endSpec().build(), null,
|
||||||
|
s -> s.withNewSchedulingSpec().addToTopologySpreadConstraints(tsc).endSchedulingSpec())
|
||||||
|
.getSpec().getTemplate();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assertThat(podTemplate.getSpec().getTopologySpreadConstraints()).isNotEqualTo(Arrays.asList(tsc));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAffinity() {
|
||||||
|
// Arrange
|
||||||
|
PodTemplateSpec additionalPodTemplate = null;
|
||||||
|
|
||||||
|
var affinity = new AffinityBuilder().withNewPodAffinity()
|
||||||
|
.addNewPreferredDuringSchedulingIgnoredDuringExecution().withNewPodAffinityTerm().withNamespaces("x")
|
||||||
|
.endPodAffinityTerm().endPreferredDuringSchedulingIgnoredDuringExecution().endPodAffinity().build();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
var podTemplate = getDeployment(additionalPodTemplate, null,
|
||||||
|
s -> s.withNewSchedulingSpec().withAffinity(affinity).endSchedulingSpec())
|
||||||
|
.getSpec().getTemplate();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assertThat(podTemplate.getSpec().getAffinity()).isEqualTo(affinity);
|
||||||
|
|
||||||
|
podTemplate = getDeployment(new PodTemplateSpecBuilder().withNewSpec().withAffinity(new Affinity()).endSpec().build(), null,
|
||||||
|
s -> s.withNewSchedulingSpec().withAffinity(affinity).endSchedulingSpec())
|
||||||
|
.getSpec().getTemplate();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assertThat(podTemplate.getSpec().getAffinity()).isNotEqualTo(affinity);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue