KEYCLOAK-2412 wildfly configs and polishing

This commit is contained in:
mposolda 2016-02-17 13:02:47 +01:00
parent f52f998bcd
commit e237bcd383
12 changed files with 41 additions and 21 deletions

View file

@ -89,6 +89,7 @@
<local-cache name="sessions"/>
<local-cache name="offlineSessions"/>
<local-cache name="loginFailures"/>
<local-cache name="work"/>
<local-cache name="realmVersions">
<transaction mode="BATCH" locking="PESSIMISTIC"/>
</local-cache>

View file

@ -7,5 +7,8 @@ embed-server --server-config=standalone-ha.xml
/subsystem=infinispan/cache-container=keycloak/distributed-cache=sessions:add(mode="SYNC",owners="1")
/subsystem=infinispan/cache-container=keycloak/distributed-cache=offlineSessions:add(mode="SYNC",owners="1")
/subsystem=infinispan/cache-container=keycloak/distributed-cache=loginFailures:add(mode="SYNC",owners="1")
/subsystem=infinispan/cache-container=keycloak/replicated-cache=work:add(mode="SYNC")
/subsystem=infinispan/cache-container=keycloak/local-cache=realmVersions:add()
/subsystem=infinispan/cache-container=keycloak/local-cache=realmVersions/transaction=TRANSACTION:add(mode=BATCH,locking=PESSIMISTIC)
/extension=org.keycloak.keycloak-server-subsystem/:add(module=org.keycloak.keycloak-server-subsystem)
/subsystem=keycloak-server:add(web-context=auth)

View file

@ -6,5 +6,8 @@ embed-server --server-config=standalone.xml
/subsystem=infinispan/cache-container=keycloak/local-cache=sessions:add()
/subsystem=infinispan/cache-container=keycloak/local-cache=offlineSessions:add()
/subsystem=infinispan/cache-container=keycloak/local-cache=loginFailures:add()
/subsystem=infinispan/cache-container=keycloak/local-cache=work:add()
/subsystem=infinispan/cache-container=keycloak/local-cache=realmVersions:add()
/subsystem=infinispan/cache-container=keycloak/local-cache=realmVersions/transaction=TRANSACTION:add(mode=BATCH,locking=PESSIMISTIC)
/extension=org.keycloak.keycloak-server-subsystem/:add(module=org.keycloak.keycloak-server-subsystem)
/subsystem=keycloak-server:add(web-context=auth)

View file

@ -34,7 +34,6 @@ import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.models.KeycloakSession;
/**
* Various utils related to clustering
*
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
@ -67,7 +66,7 @@ public class InfinispanClusterProvider implements ClusterProvider {
existingClusterStartTime = (Integer) cache.putIfAbsent(InfinispanClusterProvider.CLUSTER_STARTUP_TIME_KEY, serverStartTime);
if (existingClusterStartTime == null) {
logger.infof("Initialized cluster startup time to %s", Time.toDate(serverStartTime).toString());
logger.debugf("Initialized cluster startup time to %s", Time.toDate(serverStartTime).toString());
return serverStartTime;
} else {
return existingClusterStartTime;
@ -140,19 +139,26 @@ public class InfinispanClusterProvider implements ClusterProvider {
int thatTime = existingLock.getTimestamp();
int currentTime = Time.currentTime();
if (thatTime + taskTimeoutInSeconds < currentTime) {
logger.infof("Task %s outdated when in progress by node %s. Will try to replace task with our node %s", cacheKey, existingLock.getNode(), myLock.getNode());
if (logger.isTraceEnabled()) {
logger.tracef("Task %s outdated when in progress by node %s. Will try to replace task with our node %s", cacheKey, existingLock.getNode(), myLock.getNode());
}
boolean replaced = cache.replace(cacheKey, existingLock, myLock);
// TODO: trace
if (!replaced) {
logger.infof("Failed to replace the task %s. Other thread replaced in the meantime. Ignoring task.", cacheKey);
if (logger.isTraceEnabled()) {
logger.tracef("Failed to replace the task %s. Other thread replaced in the meantime. Ignoring task.", cacheKey);
}
}
return replaced;
} else {
logger.infof("Task %s in progress already by node %s. Ignoring task.", cacheKey, existingLock.getNode());
if (logger.isTraceEnabled()) {
logger.tracef("Task %s in progress already by node %s. Ignoring task.", cacheKey, existingLock.getNode());
}
return false;
}
} else {
logger.infof("Successfully acquired lock for task %s. Our node is %s", cacheKey, myLock.getNode());
if (logger.isTraceEnabled()) {
logger.tracef("Successfully acquired lock for task %s. Our node is %s", cacheKey, myLock.getNode());
}
return true;
}
}
@ -166,7 +172,9 @@ public class InfinispanClusterProvider implements ClusterProvider {
cache.getAdvancedCache()
.withFlags(Flag.IGNORE_RETURN_VALUES, Flag.FORCE_SYNCHRONOUS)
.remove(cacheKey);
logger.infof("Task %s removed from the cache", cacheKey);
if (logger.isTraceEnabled()) {
logger.tracef("Task %s removed from the cache", cacheKey);
}
return;
} catch (RuntimeException e) {
ComponentStatus status = cache.getStatus();

View file

@ -119,7 +119,7 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory
return;
}
logger.infof("Nodes %s removed from cluster. Removing tasks locked by this nodes", removedNodesAddresses.toString());
logger.debugf("Nodes %s removed from cluster. Removing tasks locked by this nodes", removedNodesAddresses.toString());
Cache<String, Serializable> cache = cacheManager.getCache(InfinispanConnectionProvider.WORK_CACHE_NAME);
@ -146,7 +146,9 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory
while (toRemove.hasNext()) {
String rem = toRemove.next();
logger.infof("Removing task %s due it's node left cluster", rem);
if (logger.isTraceEnabled()) {
logger.tracef("Removing task %s due it's node left cluster", rem);
}
cache.remove(rem);
}
}

View file

@ -40,7 +40,7 @@ import org.keycloak.models.utils.KeycloakModelUtils;
* Startup initialization for reading persistent userSessions/clientSessions to be filled into infinispan/memory . In cluster,
* the initialization is distributed among all cluster nodes, so the startup time is even faster
*
* TODO: Move to clusterService. Implementation is already pretty generic and doesn't contain any "userSession" specific stuff. All logic is in the SessionLoader implementation
* TODO: Move to clusterService. Implementation is already pretty generic and doesn't contain any "userSession" specific stuff. All sessions-specific logic is in the SessionLoader implementation
*
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/

View file

@ -17,8 +17,6 @@
package org.keycloak.cluster;
import java.io.Serializable;
/**
* Task to be executed on all cluster nodes once it's notified.
*

View file

@ -23,12 +23,14 @@ import java.util.concurrent.Callable;
import org.keycloak.provider.Provider;
/**
* Various utils related to clustering and concurrent tasks on cluster nodes
*
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public interface ClusterProvider extends Provider {
/**
* Will use startup time of this server in non-cluster environment. Otherwise the value is same for all cluster nodes
* Same value for all cluster nodes. It will use startup time of this server in non-cluster environment.
*/
int getClusterStartupTime();

View file

@ -106,7 +106,7 @@ public class UsersSyncManager {
});
if (holder.result == null || !holder.result.isExecuted()) {
logger.infof("syncAllUsers for federation provider %s was ignored as it's already in progress", fedProvider.getDisplayName());
logger.debugf("syncAllUsers for federation provider %s was ignored as it's already in progress", fedProvider.getDisplayName());
return UserFederationSyncResult.ignored();
} else {
return holder.result.getResult();
@ -145,7 +145,7 @@ public class UsersSyncManager {
});
if (holder.result == null || !holder.result.isExecuted()) {
logger.infof("syncChangedUsers for federation provider %s was ignored as it's already in progress", fedProvider.getDisplayName());
logger.debugf("syncChangedUsers for federation provider %s was ignored as it's already in progress", fedProvider.getDisplayName());
return UserFederationSyncResult.ignored();
} else {
return holder.result.getResult();
@ -162,7 +162,7 @@ public class UsersSyncManager {
// Executed once it receives notification that some UserFederationProvider was created or updated
protected void refreshPeriodicSyncForProvider(final KeycloakSessionFactory sessionFactory, TimerProvider timer, final UserFederationProviderModel fedProvider, final String realmId) {
logger.infof("Going to refresh periodic sync for provider '%s' . Full sync period: %d , changed users sync period: %d",
logger.debugf("Going to refresh periodic sync for provider '%s' . Full sync period: %d , changed users sync period: %d",
fedProvider.getDisplayName(), fedProvider.getFullSyncPeriod(), fedProvider.getChangedSyncPeriod());
if (fedProvider.getFullSyncPeriod() > 0) {
@ -176,7 +176,7 @@ public class UsersSyncManager {
if (shouldPerformSync) {
syncAllUsers(sessionFactory, realmId, fedProvider);
} else {
logger.infof("Ignored periodic full sync with federation provider %s due small time since last sync", fedProvider.getDisplayName());
logger.debugf("Ignored periodic full sync with federation provider %s due small time since last sync", fedProvider.getDisplayName());
}
} catch (Throwable t) {
logger.errorDuringFullUserSync(t);
@ -199,7 +199,7 @@ public class UsersSyncManager {
if (shouldPerformSync) {
syncChangedUsers(sessionFactory, realmId, fedProvider);
} else {
logger.infof("Ignored periodic changed-users sync with federation provider %s due small time since last sync", fedProvider.getDisplayName());
logger.debugf("Ignored periodic changed-users sync with federation provider %s due small time since last sync", fedProvider.getDisplayName());
}
} catch (Throwable t) {
logger.errorDuringChangedUserSync(t);
@ -227,7 +227,7 @@ public class UsersSyncManager {
// Executed once it receives notification that some UserFederationProvider was removed
protected void removePeriodicSyncForProvider(TimerProvider timer, UserFederationProviderModel fedProvider) {
logger.infof("Removing periodic sync for provider %s", fedProvider.getDisplayName());
logger.debugf("Removing periodic sync for provider %s", fedProvider.getDisplayName());
timer.cancelTask(fedProvider.getId() + "-FULL");
timer.cancelTask(fedProvider.getId() + "-CHANGED");
}

View file

@ -100,7 +100,7 @@ public class UserSessionInitializerTest {
// Clear ispn cache to ensure initializerState is removed as well
InfinispanConnectionProvider infinispan = session.getProvider(InfinispanConnectionProvider.class);
infinispan.getCache(InfinispanConnectionProvider.OFFLINE_SESSION_CACHE_NAME).clear();
infinispan.getCache(InfinispanConnectionProvider.WORK_CACHE_NAME).clear();
resetSession();

View file

@ -62,6 +62,7 @@ public class KeycloakServerDeploymentProcessor implements DeploymentUnitProcesso
st.addDependency(cacheContainerService.append("sessions"));
st.addDependency(cacheContainerService.append("offlineSessions"));
st.addDependency(cacheContainerService.append("loginFailures"));
st.addDependency(cacheContainerService.append("work"));
st.addDependency(cacheContainerService.append("realmVersions"));
}
}

View file

@ -30,6 +30,7 @@
<local-cache name="sessions"/>
<local-cache name="offlineSessions"/>
<local-cache name="loginFailures"/>
<local-cache name="work"/>
<local-cache name="realmVersions">
<transaction mode="BATCH" locking="PESSIMISTIC"/>
</local-cache>
@ -90,6 +91,7 @@
<distributed-cache name="sessions" mode="SYNC" owners="1"/>
<distributed-cache name="offlineSessions" mode="SYNC" owners="1"/>
<distributed-cache name="loginFailures" mode="SYNC" owners="1"/>
<replicated-cache name="work" mode="SYNC" />
<local-cache name="realmVersions">
<transaction mode="BATCH" locking="PESSIMISTIC"/>
</local-cache>