Avoiding AvlPartitionFactory and using JdbmPartitionFactory for the embedded LDAP to work around unstable tests.
Fix for #11171 didn't turn out to cover the root cause. Also improved transaction handling in LDAP Map storage. Closes #11211
This commit is contained in:
parent
bde2744650
commit
a6dd9dc0f1
11 changed files with 75 additions and 92 deletions
|
@ -39,6 +39,7 @@ import org.keycloak.models.map.storage.ldap.model.LdapMapObject;
|
||||||
import org.keycloak.models.map.storage.ldap.model.LdapMapQuery;
|
import org.keycloak.models.map.storage.ldap.model.LdapMapQuery;
|
||||||
import org.keycloak.models.map.storage.ldap.role.config.LdapMapRoleMapperConfig;
|
import org.keycloak.models.map.storage.ldap.role.config.LdapMapRoleMapperConfig;
|
||||||
import org.keycloak.models.map.storage.ldap.role.entity.LdapRoleEntity;
|
import org.keycloak.models.map.storage.ldap.role.entity.LdapRoleEntity;
|
||||||
|
import org.keycloak.provider.Provider;
|
||||||
|
|
||||||
import javax.naming.NamingException;
|
import javax.naming.NamingException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -53,9 +54,8 @@ import java.util.Set;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import java.util.stream.Stream;
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
public class LdapRoleMapKeycloakTransaction extends LdapMapKeycloakTransaction<LdapMapRoleEntityFieldDelegate, MapRoleEntity, RoleModel> {
|
public class LdapRoleMapKeycloakTransaction extends LdapMapKeycloakTransaction<LdapMapRoleEntityFieldDelegate, MapRoleEntity, RoleModel> implements Provider {
|
||||||
|
|
||||||
private final KeycloakSession session;
|
|
||||||
private final StringKeyConverter<String> keyConverter = new StringKeyConverter.StringKey();
|
private final StringKeyConverter<String> keyConverter = new StringKeyConverter.StringKey();
|
||||||
private final Set<String> deletedKeys = new HashSet<>();
|
private final Set<String> deletedKeys = new HashSet<>();
|
||||||
private final LdapMapRoleMapperConfig roleMapperConfig;
|
private final LdapMapRoleMapperConfig roleMapperConfig;
|
||||||
|
@ -63,10 +63,10 @@ public class LdapRoleMapKeycloakTransaction extends LdapMapKeycloakTransaction<L
|
||||||
private final LdapMapIdentityStore identityStore;
|
private final LdapMapIdentityStore identityStore;
|
||||||
|
|
||||||
public LdapRoleMapKeycloakTransaction(KeycloakSession session, Config.Scope config) {
|
public LdapRoleMapKeycloakTransaction(KeycloakSession session, Config.Scope config) {
|
||||||
this.session = session;
|
|
||||||
this.roleMapperConfig = new LdapMapRoleMapperConfig(config);
|
this.roleMapperConfig = new LdapMapRoleMapperConfig(config);
|
||||||
this.ldapMapConfig = new LdapMapConfig(config);
|
this.ldapMapConfig = new LdapMapConfig(config);
|
||||||
this.identityStore = new LdapMapIdentityStore(session, ldapMapConfig);
|
this.identityStore = new LdapMapIdentityStore(session, ldapMapConfig);
|
||||||
|
session.enlistForClose(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
// interface matching the constructor of this class
|
// interface matching the constructor of this class
|
||||||
|
@ -193,13 +193,18 @@ public class LdapRoleMapKeycloakTransaction extends LdapMapKeycloakTransaction<L
|
||||||
if (read == null) {
|
if (read == null) {
|
||||||
throw new ModelException("unable to read entity with key " + key);
|
throw new ModelException("unable to read entity with key " + key);
|
||||||
}
|
}
|
||||||
deletedKeys.add(key);
|
if (!deletedKeys.contains((key))) {
|
||||||
tasksOnCommit.add(new DeleteOperation() {
|
// avoid enlisting LDAP removal twice if client calls it twice
|
||||||
@Override
|
deletedKeys.add(key);
|
||||||
public void execute() {
|
tasksOnCommit.add(new DeleteOperation() {
|
||||||
identityStore.remove(read.getLdapMapObject());
|
@Override
|
||||||
}
|
public void execute() {
|
||||||
});
|
identityStore.remove(read.getLdapMapObject());
|
||||||
|
// once removed from LDAP, avoid updating a modified entity in LDAP.
|
||||||
|
entities.remove(read.getId());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -392,8 +397,11 @@ public class LdapRoleMapKeycloakTransaction extends LdapMapKeycloakTransaction<L
|
||||||
identityStore.update(entity.getLdapMapObject());
|
identityStore.update(entity.getLdapMapObject());
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
// once the commit is complete, clear the local storage to avoid problems when rollback() is called later
|
||||||
identityStore.close();
|
// due to a different transaction failing.
|
||||||
|
tasksOnCommit.clear();
|
||||||
|
entities.clear();
|
||||||
|
tasksOnRollback.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -409,4 +417,9 @@ public class LdapRoleMapKeycloakTransaction extends LdapMapKeycloakTransaction<L
|
||||||
return new LdapRoleModelCriteriaBuilder(roleMapperConfig);
|
return new LdapRoleModelCriteriaBuilder(roleMapperConfig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
identityStore.close();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,13 +74,7 @@ public class TestLDAPResource {
|
||||||
MultivaluedHashMap<String, String> ldapConfig = toComponentConfig(ldapCfg);
|
MultivaluedHashMap<String, String> ldapConfig = toComponentConfig(ldapCfg);
|
||||||
ldapConfig.putSingle(LDAPConstants.SYNC_REGISTRATIONS, "true");
|
ldapConfig.putSingle(LDAPConstants.SYNC_REGISTRATIONS, "true");
|
||||||
ldapConfig.putSingle(LDAPConstants.EDIT_MODE, UserStorageProvider.EditMode.WRITABLE.toString());
|
ldapConfig.putSingle(LDAPConstants.EDIT_MODE, UserStorageProvider.EditMode.WRITABLE.toString());
|
||||||
// ApacheDS has a problem when processing an unbind request just before closing the connection, it will print
|
|
||||||
// "ignoring the message ... received from null session" and drop the message. To work around this:
|
|
||||||
// (1) enable connection pooling, to avoid short-lived connections
|
|
||||||
ldapConfig.putSingle(LDAPConstants.CONNECTION_POOLING, "true");
|
ldapConfig.putSingle(LDAPConstants.CONNECTION_POOLING, "true");
|
||||||
// (2) set pref size to max size so that there are no connections that are opened and then closed immediately again
|
|
||||||
ldapConfig.putSingle(LDAPConstants.CONNECTION_POOLING_PREFSIZE, "100");
|
|
||||||
ldapConfig.putSingle(LDAPConstants.CONNECTION_POOLING_MAXSIZE, "100");
|
|
||||||
UserStorageProviderModel model = new UserStorageProviderModel();
|
UserStorageProviderModel model = new UserStorageProviderModel();
|
||||||
model.setLastSync(0);
|
model.setLastSync(0);
|
||||||
model.setChangedSyncPeriod(-1);
|
model.setChangedSyncPeriod(-1);
|
||||||
|
|
|
@ -92,13 +92,7 @@ public class LDAPRule extends ExternalResource {
|
||||||
public Statement apply(Statement base, Description description) {
|
public Statement apply(Statement base, Description description) {
|
||||||
// Default bind credential value
|
// Default bind credential value
|
||||||
defaultProperties.setProperty(LDAPConstants.BIND_CREDENTIAL, "secret");
|
defaultProperties.setProperty(LDAPConstants.BIND_CREDENTIAL, "secret");
|
||||||
// ApacheDS has a problem when processing an unbind request just before closing the connection, it will print
|
|
||||||
// "ignoring the message ... received from null session" and drop the message. To work around this:
|
|
||||||
// (1) enable connection pooling, to avoid short-lived connections
|
|
||||||
defaultProperties.setProperty(LDAPConstants.CONNECTION_POOLING, "true");
|
defaultProperties.setProperty(LDAPConstants.CONNECTION_POOLING, "true");
|
||||||
// (2) set pref size to max size so that there are no connections that are opened and then closed immediately again
|
|
||||||
defaultProperties.setProperty(LDAPConstants.CONNECTION_POOLING_MAXSIZE, "100");
|
|
||||||
defaultProperties.setProperty(LDAPConstants.CONNECTION_POOLING_PREFSIZE, "100");
|
|
||||||
// Default values of the authentication / access control method and connection encryption to use on the embedded
|
// Default values of the authentication / access control method and connection encryption to use on the embedded
|
||||||
// LDAP server upon start if not (re)set later via the LDAPConnectionParameters annotation directly on the test
|
// LDAP server upon start if not (re)set later via the LDAPConnectionParameters annotation directly on the test
|
||||||
defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_ACCESS_CONTROL, "true");
|
defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_ACCESS_CONTROL, "true");
|
||||||
|
|
|
@ -81,11 +81,6 @@ public class LDAPTestConfiguration {
|
||||||
DEFAULT_VALUES.put(LDAPConstants.BIND_CREDENTIAL, "secret");
|
DEFAULT_VALUES.put(LDAPConstants.BIND_CREDENTIAL, "secret");
|
||||||
DEFAULT_VALUES.put(LDAPConstants.VENDOR, LDAPConstants.VENDOR_OTHER);
|
DEFAULT_VALUES.put(LDAPConstants.VENDOR, LDAPConstants.VENDOR_OTHER);
|
||||||
DEFAULT_VALUES.put(LDAPConstants.CONNECTION_POOLING, "true");
|
DEFAULT_VALUES.put(LDAPConstants.CONNECTION_POOLING, "true");
|
||||||
// ApacheDS has a problem when processing an unbind request just before closing the connection, it will print
|
|
||||||
// "ignoring the message ... received from null session" and drop the message. To work around this:
|
|
||||||
// set pref size to max size so that there are no connections that are opened and then closed immediately again
|
|
||||||
DEFAULT_VALUES.put(LDAPConstants.CONNECTION_POOLING_MAXSIZE, "100");
|
|
||||||
DEFAULT_VALUES.put(LDAPConstants.CONNECTION_POOLING_PREFSIZE, "100");
|
|
||||||
DEFAULT_VALUES.put(LDAPConstants.PAGINATION, "true");
|
DEFAULT_VALUES.put(LDAPConstants.PAGINATION, "true");
|
||||||
DEFAULT_VALUES.put(LDAPConstants.BATCH_SIZE_FOR_SYNC, String.valueOf(LDAPConstants.DEFAULT_BATCH_SIZE_FOR_SYNC));
|
DEFAULT_VALUES.put(LDAPConstants.BATCH_SIZE_FOR_SYNC, String.valueOf(LDAPConstants.DEFAULT_BATCH_SIZE_FOR_SYNC));
|
||||||
DEFAULT_VALUES.put(LDAPConstants.USERNAME_LDAP_ATTRIBUTE, null);
|
DEFAULT_VALUES.put(LDAPConstants.USERNAME_LDAP_ATTRIBUTE, null);
|
||||||
|
|
|
@ -87,13 +87,7 @@ public class LdapMapStorage extends KeycloakModelParameters {
|
||||||
.config("role.attributes", "ou")
|
.config("role.attributes", "ou")
|
||||||
.config("mode", "LDAP_ONLY")
|
.config("mode", "LDAP_ONLY")
|
||||||
.config("use.realm.roles.mapping", "true")
|
.config("use.realm.roles.mapping", "true")
|
||||||
// ApacheDS has a problem when processing an unbind request just before closing the connection, it will print
|
.config(LDAPConstants.CONNECTION_POOLING, "true");
|
||||||
// "ignoring the message ... received from null session" and drop the message. To work around this:
|
|
||||||
// (1) enable connection pooling, to avoid short-lived connections
|
|
||||||
.config(LDAPConstants.CONNECTION_POOLING, "true")
|
|
||||||
// (2) set pref size to max size so that there are no connections that are opened and then closed immediately again
|
|
||||||
.config(LDAPConstants.CONNECTION_POOLING_PREFSIZE, "1000")
|
|
||||||
.config(LDAPConstants.CONNECTION_POOLING_MAXSIZE, "1000");
|
|
||||||
|
|
||||||
cf.spi("client").config("map.storage.provider", ConcurrentHashMapStorageProviderFactory.PROVIDER_ID)
|
cf.spi("client").config("map.storage.provider", ConcurrentHashMapStorageProviderFactory.PROVIDER_ID)
|
||||||
.spi("clientScope").config("map.storage.provider", ConcurrentHashMapStorageProviderFactory.PROVIDER_ID)
|
.spi("clientScope").config("map.storage.provider", ConcurrentHashMapStorageProviderFactory.PROVIDER_ID)
|
||||||
|
@ -120,45 +114,4 @@ public class LdapMapStorage extends KeycloakModelParameters {
|
||||||
return ldapRule.apply(base, description);
|
return ldapRule.apply(base, description);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Statement instanceRule(Statement base, Description description) {
|
|
||||||
|
|
||||||
/* test execution might fail due to random errors rooted in ApacheDS, sometimes entites can't be removed,
|
|
||||||
also a follow-up test might fail when an entity already exists from a previous test. Therefore, retry in case of LDAP errors
|
|
||||||
or suspected LDAP errors. Rate of failures is about 1 in 150 attempts.
|
|
||||||
*/
|
|
||||||
return new Statement() {
|
|
||||||
@Override
|
|
||||||
public void evaluate() throws Throwable {
|
|
||||||
int currentAttempt = 0;
|
|
||||||
int maxAttempts = 10;
|
|
||||||
while (true) {
|
|
||||||
try {
|
|
||||||
base.evaluate();
|
|
||||||
return;
|
|
||||||
} catch (Throwable t) {
|
|
||||||
boolean shouldRetry = false;
|
|
||||||
Throwable t2 = t;
|
|
||||||
while(t2 != null) {
|
|
||||||
if ((t2 instanceof ModelException && t2.getMessage() != null && t2.getMessage().startsWith("Could not unbind DN")
|
|
||||||
&& t.getCause() instanceof NamingException) ||
|
|
||||||
t2 instanceof ModelDuplicateException) {
|
|
||||||
shouldRetry = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
t2 = t2.getCause();
|
|
||||||
}
|
|
||||||
if (!shouldRetry || currentAttempt > maxAttempts) {
|
|
||||||
throw t;
|
|
||||||
}
|
|
||||||
LOG.warn("retrying after exception", t);
|
|
||||||
// reset LDAP so that is is really cleaned up and no previous elements remain
|
|
||||||
ldapRule.after();
|
|
||||||
ldapRule.before();
|
|
||||||
++ currentAttempt;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,13 +73,7 @@ public class LdapUserStorage extends KeycloakModelParameters {
|
||||||
}
|
}
|
||||||
config.putSingle(LDAPConstants.SYNC_REGISTRATIONS, "true");
|
config.putSingle(LDAPConstants.SYNC_REGISTRATIONS, "true");
|
||||||
config.putSingle(LDAPConstants.EDIT_MODE, UserStorageProvider.EditMode.WRITABLE.toString());
|
config.putSingle(LDAPConstants.EDIT_MODE, UserStorageProvider.EditMode.WRITABLE.toString());
|
||||||
// ApacheDS has a problem when processing an unbind request just before closing the connection, it will print
|
|
||||||
// "ignoring the message ... received from null session" and drop the message. To work around this:
|
|
||||||
// (1) enable connection pooling, to avoid short-lived connections
|
|
||||||
config.putSingle(LDAPConstants.CONNECTION_POOLING, "true");
|
config.putSingle(LDAPConstants.CONNECTION_POOLING, "true");
|
||||||
// (2) set pref size to max size so that there are no connections that are opened and then closed immediately again
|
|
||||||
config.putSingle(LDAPConstants.CONNECTION_POOLING_PREFSIZE, "1000");
|
|
||||||
config.putSingle(LDAPConstants.CONNECTION_POOLING_MAXSIZE, "1000");
|
|
||||||
|
|
||||||
UserStorageProviderModel federatedStorage = new UserStorageProviderModel();
|
UserStorageProviderModel federatedStorage = new UserStorageProviderModel();
|
||||||
federatedStorage.setName(LDAPStorageProviderFactory.PROVIDER_NAME + ":" + counter.getAndIncrement());
|
federatedStorage.setName(LDAPStorageProviderFactory.PROVIDER_NAME + ":" + counter.getAndIncrement());
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
package org.keycloak.testsuite.model.role;
|
package org.keycloak.testsuite.model.role;
|
||||||
|
|
||||||
import org.hamcrest.Matcher;
|
import org.hamcrest.Matcher;
|
||||||
import org.junit.Rule;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.rules.TestRule;
|
|
||||||
import org.keycloak.models.ClientModel;
|
import org.keycloak.models.ClientModel;
|
||||||
import org.keycloak.models.ClientProvider;
|
import org.keycloak.models.ClientProvider;
|
||||||
import org.keycloak.models.Constants;
|
import org.keycloak.models.Constants;
|
||||||
|
|
|
@ -60,6 +60,9 @@ log4j.logger.org.infinispan.server.hotrod=info
|
||||||
log4j.logger.org.infinispan.client.hotrod.impl=info
|
log4j.logger.org.infinispan.client.hotrod.impl=info
|
||||||
log4j.logger.org.infinispan.client.hotrod.event.impl=info
|
log4j.logger.org.infinispan.client.hotrod.event.impl=info
|
||||||
|
|
||||||
|
# avoid logging INFO-message "ignoring the message MessageType : UNBIND_REQUEST" very often
|
||||||
|
log4j.logger.org.apache.directory.server.ldap.handlers.LdapRequestHandler=warn
|
||||||
|
|
||||||
log4j.logger.org.keycloak.executors=info
|
log4j.logger.org.keycloak.executors=info
|
||||||
|
|
||||||
#log4j.logger.org.infinispan.expiration.impl.ClusterExpirationManager=trace
|
#log4j.logger.org.infinispan.expiration.impl.ClusterExpirationManager=trace
|
||||||
|
|
|
@ -137,12 +137,7 @@
|
||||||
"role.attributes": "ou",
|
"role.attributes": "ou",
|
||||||
"mode": "LDAP_ONLY",
|
"mode": "LDAP_ONLY",
|
||||||
"use.realm.roles.mapping": "true",
|
"use.realm.roles.mapping": "true",
|
||||||
"//connectionPooling": "ApacheDS has a problem when processing an unbind request just before closing the connection, therefore enable connection pooling to avoid short-lived connections",
|
"connectionPooling": "true"
|
||||||
"connectionPooling": "true",
|
|
||||||
"//connectionPoolingPrefSize": "ApacheDS has a problem when processing an unbind request just before closing the connection, therefore set max=preferred to avoid short-lived connections",
|
|
||||||
"connectionPoolingPrefSize": "1000",
|
|
||||||
"//connectionPoolingMaxSize": "ApacheDS has a problem when processing an unbind request just before closing the connection, therefore set max=preferred to avoid short-lived connections",
|
|
||||||
"connectionPoolingMaxSize": "1000"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2022. Red Hat, Inc. and/or its affiliates
|
||||||
|
* and other contributors as indicated by the @author tags.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.keycloak.util.ldap;
|
||||||
|
|
||||||
|
import org.apache.directory.api.ldap.model.schema.SchemaManager;
|
||||||
|
import org.apache.directory.server.core.api.DnFactory;
|
||||||
|
import org.apache.directory.server.core.factory.JdbmPartitionFactory;
|
||||||
|
import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author Alexander Schwartz
|
||||||
|
*/
|
||||||
|
public class JdbmPartitionFactoryFast extends JdbmPartitionFactory {
|
||||||
|
@Override
|
||||||
|
public JdbmPartition createPartition(SchemaManager schemaManager, DnFactory dnFactory, String id, String suffix, int cacheSize, File workingDirectory) throws Exception {
|
||||||
|
JdbmPartition partition = super.createPartition(schemaManager, dnFactory, id, suffix, cacheSize, workingDirectory);
|
||||||
|
// don't write to disk on every update, thereby slightly speeding up the tests
|
||||||
|
partition.setSyncOnWrite(false);
|
||||||
|
return partition;
|
||||||
|
}
|
||||||
|
}
|
|
@ -207,7 +207,13 @@ public class LDAPEmbeddedServer {
|
||||||
dcName = dcName.substring(dcName.indexOf("=") + 1);
|
dcName = dcName.substring(dcName.indexOf("=") + 1);
|
||||||
|
|
||||||
if (this.directoryServiceFactory.equals(DSF_INMEMORY)) {
|
if (this.directoryServiceFactory.equals(DSF_INMEMORY)) {
|
||||||
System.setProperty( "apacheds.partition.factory", AvlPartitionFactory.class.getName());
|
// this used to be AvlPartitionFactory but it didn't prove stable in testing;
|
||||||
|
// sometimes the search returned an OPERATIONS_ERROR, sometimes after retrieving a list of entries
|
||||||
|
// and deleting them one by one, an entry was missing before it was deleted and either the search or the deletion failed.
|
||||||
|
// This happened in approximately one out of 100 test runs for RoleModelTest.
|
||||||
|
// This all happened with ApacheDS 2.0.0.AM26. Once changed to JdbmPartitionFactory, the problems disappeared.
|
||||||
|
// https://issues.apache.org/jira/browse/DIRSERVER-2369
|
||||||
|
System.setProperty( "apacheds.partition.factory", JdbmPartitionFactoryFast.class.getName());
|
||||||
} else if (this.directoryServiceFactory.equals(DSF_FILE)) {
|
} else if (this.directoryServiceFactory.equals(DSF_FILE)) {
|
||||||
System.setProperty( "apacheds.partition.factory", JdbmPartitionFactory.class.getName());
|
System.setProperty( "apacheds.partition.factory", JdbmPartitionFactory.class.getName());
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in a new issue