Merge pull request #4357 from mposolda/KEYCLOAK-4898-crossdc-userSessions-rebased3
KEYCLOAK-4187 Added UserSession support for cross-dc
This commit is contained in:
commit
ca9956c36b
140 changed files with 7071 additions and 1187 deletions
|
@ -18,6 +18,7 @@
|
|||
package org.keycloak.storage.ldap.mappers;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.AbstractKeycloakTransaction;
|
||||
import org.keycloak.models.KeycloakTransaction;
|
||||
import org.keycloak.storage.ldap.LDAPStorageProvider;
|
||||
import org.keycloak.storage.ldap.idm.model.LDAPObject;
|
||||
|
@ -25,12 +26,10 @@ import org.keycloak.storage.ldap.idm.model.LDAPObject;
|
|||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class LDAPTransaction implements KeycloakTransaction {
|
||||
public class LDAPTransaction extends AbstractKeycloakTransaction {
|
||||
|
||||
public static final Logger logger = Logger.getLogger(LDAPTransaction.class);
|
||||
|
||||
protected TransactionState state = TransactionState.NOT_STARTED;
|
||||
|
||||
private final LDAPStorageProvider ldapProvider;
|
||||
private final LDAPObject ldapUser;
|
||||
|
||||
|
@ -39,57 +38,21 @@ public class LDAPTransaction implements KeycloakTransaction {
|
|||
this.ldapUser = ldapUser;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin() {
|
||||
if (state != TransactionState.NOT_STARTED) {
|
||||
throw new IllegalStateException("Transaction already started");
|
||||
}
|
||||
|
||||
state = TransactionState.STARTED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit() {
|
||||
if (state != TransactionState.STARTED) {
|
||||
throw new IllegalStateException("Transaction in illegal state for commit: " + state);
|
||||
}
|
||||
|
||||
protected void commitImpl() {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Transaction commit! Updating LDAP attributes for object " + ldapUser.getDn().toString() + ", attributes: " + ldapUser.getAttributes());
|
||||
}
|
||||
|
||||
ldapProvider.getLdapIdentityStore().update(ldapUser);
|
||||
state = TransactionState.FINISHED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
if (state != TransactionState.STARTED && state != TransactionState.ROLLBACK_ONLY) {
|
||||
throw new IllegalStateException("Transaction in illegal state for rollback: " + state);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void rollbackImpl() {
|
||||
logger.warn("Transaction rollback! Ignoring LDAP updates for object " + ldapUser.getDn().toString());
|
||||
state = TransactionState.FINISHED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRollbackOnly() {
|
||||
state = TransactionState.ROLLBACK_ONLY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getRollbackOnly() {
|
||||
return state == TransactionState.ROLLBACK_ONLY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isActive() {
|
||||
return state == TransactionState.STARTED || state == TransactionState.ROLLBACK_ONLY;
|
||||
}
|
||||
|
||||
|
||||
protected enum TransactionState {
|
||||
NOT_STARTED, STARTED, ROLLBACK_ONLY, FINISHED
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ public abstract class TxAwareLDAPUserModelDelegate extends UserModelDelegate {
|
|||
|
||||
protected void ensureTransactionStarted() {
|
||||
LDAPTransaction transaction = provider.getUserManager().getTransaction(getId());
|
||||
if (transaction.state == LDAPTransaction.TransactionState.NOT_STARTED) {
|
||||
if (transaction.getState() == LDAPTransaction.TransactionState.NOT_STARTED) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Starting and enlisting transaction for object " + ldapUser.getDn().toString());
|
||||
}
|
||||
|
|
|
@ -52,6 +52,12 @@ abstract class CrossDCAwareCacheFactory {
|
|||
// For cross-DC scenario, we need to return underlying remoteCache for atomic operations to work properly
|
||||
RemoteStore remoteStore = remoteStores.iterator().next();
|
||||
RemoteCache remoteCache = remoteStore.getRemoteCache();
|
||||
|
||||
if (remoteCache == null) {
|
||||
String cacheName = remoteStore.getConfiguration().remoteCacheName();
|
||||
throw new IllegalStateException("Remote cache '" + cacheName + "' is not available.");
|
||||
}
|
||||
|
||||
return new RemoteCacheWrapperFactory(remoteCache);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,11 @@ import org.keycloak.cluster.ExecutionResult;
|
|||
import org.keycloak.common.util.Time;
|
||||
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.FutureTask;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
|
@ -43,11 +48,14 @@ public class InfinispanClusterProvider implements ClusterProvider {
|
|||
private final CrossDCAwareCacheFactory crossDCAwareCacheFactory;
|
||||
private final InfinispanNotificationsManager notificationsManager; // Just to extract notifications related stuff to separate class
|
||||
|
||||
public InfinispanClusterProvider(int clusterStartupTime, String myAddress, CrossDCAwareCacheFactory crossDCAwareCacheFactory, InfinispanNotificationsManager notificationsManager) {
|
||||
private final ExecutorService localExecutor;
|
||||
|
||||
public InfinispanClusterProvider(int clusterStartupTime, String myAddress, CrossDCAwareCacheFactory crossDCAwareCacheFactory, InfinispanNotificationsManager notificationsManager, ExecutorService localExecutor) {
|
||||
this.myAddress = myAddress;
|
||||
this.clusterStartupTime = clusterStartupTime;
|
||||
this.crossDCAwareCacheFactory = crossDCAwareCacheFactory;
|
||||
this.notificationsManager = notificationsManager;
|
||||
this.localExecutor = localExecutor;
|
||||
}
|
||||
|
||||
|
||||
|
@ -85,6 +93,34 @@ public class InfinispanClusterProvider implements ClusterProvider {
|
|||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Future<Boolean> executeIfNotExecutedAsync(String taskKey, int taskTimeoutInSeconds, Callable task) {
|
||||
TaskCallback newCallback = new TaskCallback();
|
||||
TaskCallback callback = this.notificationsManager.registerTaskCallback(TASK_KEY_PREFIX + taskKey, newCallback);
|
||||
|
||||
// We successfully submitted our task
|
||||
if (newCallback == callback) {
|
||||
Callable<Boolean> wrappedTask = () -> {
|
||||
boolean executed = executeIfNotExecuted(taskKey, taskTimeoutInSeconds, task).isExecuted();
|
||||
|
||||
if (!executed) {
|
||||
logger.infof("Task already in progress on other cluster node. Will wait until it's finished");
|
||||
}
|
||||
|
||||
callback.getTaskCompletedLatch().await(taskTimeoutInSeconds, TimeUnit.SECONDS);
|
||||
return callback.isSuccess();
|
||||
};
|
||||
|
||||
Future<Boolean> future = localExecutor.submit(wrappedTask);
|
||||
callback.setFuture(future);
|
||||
} else {
|
||||
logger.infof("Task already in progress on this cluster node. Will wait until it's finished");
|
||||
}
|
||||
|
||||
return callback.getFuture();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void registerListener(String taskKey, ClusterListener task) {
|
||||
this.notificationsManager.registerListener(taskKey, task);
|
||||
|
@ -92,11 +128,10 @@ public class InfinispanClusterProvider implements ClusterProvider {
|
|||
|
||||
|
||||
@Override
|
||||
public void notify(String taskKey, ClusterEvent event, boolean ignoreSender) {
|
||||
this.notificationsManager.notify(taskKey, event, ignoreSender);
|
||||
public void notify(String taskKey, ClusterEvent event, boolean ignoreSender, DCNotify dcNotify) {
|
||||
this.notificationsManager.notify(taskKey, event, ignoreSender, dcNotify);
|
||||
}
|
||||
|
||||
|
||||
private LockEntry createLockEntry() {
|
||||
LockEntry lock = new LockEntry();
|
||||
lock.setNode(myAddress);
|
||||
|
|
|
@ -35,12 +35,15 @@ import org.keycloak.common.util.Time;
|
|||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.sessions.infinispan.util.InfinispanUtil;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -62,17 +65,18 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory
|
|||
// Ensure that atomic operations (like putIfAbsent) must work correctly in any of: non-clustered, clustered or cross-Data-Center (cross-DC) setups
|
||||
private CrossDCAwareCacheFactory crossDCAwareCacheFactory;
|
||||
|
||||
private String myAddress;
|
||||
|
||||
private int clusterStartupTime;
|
||||
|
||||
// Just to extract notifications related stuff to separate class
|
||||
private InfinispanNotificationsManager notificationsManager;
|
||||
|
||||
private ExecutorService localExecutor = Executors.newCachedThreadPool();
|
||||
|
||||
@Override
|
||||
public ClusterProvider create(KeycloakSession session) {
|
||||
lazyInit(session);
|
||||
return new InfinispanClusterProvider(clusterStartupTime, myAddress, crossDCAwareCacheFactory, notificationsManager);
|
||||
String myAddress = InfinispanUtil.getMyAddress(session);
|
||||
return new InfinispanClusterProvider(clusterStartupTime, myAddress, crossDCAwareCacheFactory, notificationsManager, localExecutor);
|
||||
}
|
||||
|
||||
private void lazyInit(KeycloakSession session) {
|
||||
|
@ -83,33 +87,23 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory
|
|||
workCache = ispnConnections.getCache(InfinispanConnectionProvider.WORK_CACHE_NAME);
|
||||
|
||||
workCache.getCacheManager().addListener(new ViewChangeListener());
|
||||
initMyAddress();
|
||||
|
||||
Set<RemoteStore> remoteStores = getRemoteStores();
|
||||
// See if we have RemoteStore (external JDG) configured for cross-Data-Center scenario
|
||||
Set<RemoteStore> remoteStores = InfinispanUtil.getRemoteStores(workCache);
|
||||
crossDCAwareCacheFactory = CrossDCAwareCacheFactory.getFactory(workCache, remoteStores);
|
||||
|
||||
clusterStartupTime = initClusterStartupTime(session);
|
||||
|
||||
notificationsManager = InfinispanNotificationsManager.create(workCache, myAddress, remoteStores);
|
||||
String myAddress = InfinispanUtil.getMyAddress(session);
|
||||
String mySite = InfinispanUtil.getMySite(session);
|
||||
|
||||
notificationsManager = InfinispanNotificationsManager.create(workCache, myAddress, mySite, remoteStores);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// See if we have RemoteStore (external JDG) configured for cross-Data-Center scenario
|
||||
private Set<RemoteStore> getRemoteStores() {
|
||||
return workCache.getAdvancedCache().getComponentRegistry().getComponent(PersistenceManager.class).getStores(RemoteStore.class);
|
||||
}
|
||||
|
||||
|
||||
protected void initMyAddress() {
|
||||
Transport transport = workCache.getCacheManager().getTransport();
|
||||
this.myAddress = transport == null ? HostUtils.getHostName() + "-" + workCache.hashCode() : transport.getAddress().toString();
|
||||
logger.debugf("My address: %s", this.myAddress);
|
||||
}
|
||||
|
||||
|
||||
protected int initClusterStartupTime(KeycloakSession session) {
|
||||
Integer existingClusterStartTime = (Integer) crossDCAwareCacheFactory.getCache().get(InfinispanClusterProvider.CLUSTER_STARTUP_TIME_KEY);
|
||||
if (existingClusterStartTime != null) {
|
||||
|
@ -201,6 +195,10 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory
|
|||
if (logger.isTraceEnabled()) {
|
||||
logger.tracef("Removing task %s due it's node left cluster", rem);
|
||||
}
|
||||
|
||||
// If we have task in progress, it needs to be notified
|
||||
notificationsManager.taskFinished(rem, false);
|
||||
|
||||
cache.remove(rem);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,31 +20,38 @@ package org.keycloak.cluster.infinispan;
|
|||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.client.hotrod.RemoteCache;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheEntryCreated;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheEntryExpired;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheEntryModified;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheEntryRemoved;
|
||||
import org.infinispan.client.hotrod.annotation.ClientListener;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheEntryCreatedEvent;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheEntryExpiredEvent;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheEntryModifiedEvent;
|
||||
import org.infinispan.client.hotrod.event.ClientEvent;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheEntryRemovedEvent;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.marshall.core.MarshalledEntry;
|
||||
import org.infinispan.notifications.Listener;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryExpired;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved;
|
||||
import org.infinispan.notifications.cachelistener.event.CacheEntryCreatedEvent;
|
||||
import org.infinispan.notifications.cachelistener.event.CacheEntryExpiredEvent;
|
||||
import org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent;
|
||||
import org.infinispan.persistence.manager.PersistenceManager;
|
||||
import org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent;
|
||||
import org.infinispan.persistence.remote.RemoteStore;
|
||||
import org.infinispan.remoting.transport.Transport;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.cluster.ClusterEvent;
|
||||
import org.keycloak.cluster.ClusterListener;
|
||||
import org.keycloak.cluster.ClusterProvider;
|
||||
import org.keycloak.common.util.HostUtils;
|
||||
import org.keycloak.common.util.MultivaluedHashMap;
|
||||
|
||||
/**
|
||||
|
@ -58,20 +65,25 @@ public class InfinispanNotificationsManager {
|
|||
|
||||
private final MultivaluedHashMap<String, ClusterListener> listeners = new MultivaluedHashMap<>();
|
||||
|
||||
private final ConcurrentMap<String, TaskCallback> taskCallbacks = new ConcurrentHashMap<>();
|
||||
|
||||
private final Cache<String, Serializable> workCache;
|
||||
|
||||
private final String myAddress;
|
||||
|
||||
private final String mySite;
|
||||
|
||||
protected InfinispanNotificationsManager(Cache<String, Serializable> workCache, String myAddress) {
|
||||
|
||||
protected InfinispanNotificationsManager(Cache<String, Serializable> workCache, String myAddress, String mySite) {
|
||||
this.workCache = workCache;
|
||||
this.myAddress = myAddress;
|
||||
this.mySite = mySite;
|
||||
}
|
||||
|
||||
|
||||
// Create and init manager including all listeners etc
|
||||
public static InfinispanNotificationsManager create(Cache<String, Serializable> workCache, String myAddress, Set<RemoteStore> remoteStores) {
|
||||
InfinispanNotificationsManager manager = new InfinispanNotificationsManager(workCache, myAddress);
|
||||
public static InfinispanNotificationsManager create(Cache<String, Serializable> workCache, String myAddress, String mySite, Set<RemoteStore> remoteStores) {
|
||||
InfinispanNotificationsManager manager = new InfinispanNotificationsManager(workCache, myAddress, mySite);
|
||||
|
||||
// We need CacheEntryListener just if we don't have remoteStore. With remoteStore will be all cluster nodes notified anyway from HotRod listener
|
||||
if (remoteStores.isEmpty()) {
|
||||
|
@ -85,6 +97,10 @@ public class InfinispanNotificationsManager {
|
|||
|
||||
logger.debugf("Added listener for HotRod remoteStore cache: %s", remoteCache.getName());
|
||||
}
|
||||
|
||||
if (mySite == null) {
|
||||
throw new IllegalStateException("Multiple datacenters available, but site name is not configured! Check your configuration");
|
||||
}
|
||||
}
|
||||
|
||||
return manager;
|
||||
|
@ -96,19 +112,37 @@ public class InfinispanNotificationsManager {
|
|||
}
|
||||
|
||||
|
||||
void notify(String taskKey, ClusterEvent event, boolean ignoreSender) {
|
||||
TaskCallback registerTaskCallback(String taskKey, TaskCallback callback) {
|
||||
TaskCallback existing = taskCallbacks.putIfAbsent(taskKey, callback);
|
||||
|
||||
if (existing != null) {
|
||||
return existing;
|
||||
} else {
|
||||
return callback;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void notify(String taskKey, ClusterEvent event, boolean ignoreSender, ClusterProvider.DCNotify dcNotify) {
|
||||
WrapperClusterEvent wrappedEvent = new WrapperClusterEvent();
|
||||
wrappedEvent.setEventKey(taskKey);
|
||||
wrappedEvent.setDelegateEvent(event);
|
||||
wrappedEvent.setIgnoreSender(ignoreSender);
|
||||
wrappedEvent.setIgnoreSenderSite(dcNotify == ClusterProvider.DCNotify.ALL_BUT_LOCAL_DC);
|
||||
wrappedEvent.setSender(myAddress);
|
||||
wrappedEvent.setSenderSite(mySite);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.tracef("Sending event %s: %s", taskKey, event);
|
||||
logger.tracef("Sending event: %s", event);
|
||||
}
|
||||
|
||||
Flag[] flags = dcNotify == ClusterProvider.DCNotify.LOCAL_DC_ONLY
|
||||
? new Flag[] { Flag.IGNORE_RETURN_VALUES, Flag.SKIP_CACHE_STORE }
|
||||
: new Flag[] { Flag.IGNORE_RETURN_VALUES };
|
||||
|
||||
// Put the value to the cache to notify listeners on all the nodes
|
||||
workCache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES)
|
||||
.put(taskKey, wrappedEvent, 120, TimeUnit.SECONDS);
|
||||
workCache.getAdvancedCache().withFlags(flags)
|
||||
.put(UUID.randomUUID().toString(), wrappedEvent, 120, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
|
||||
|
@ -124,6 +158,12 @@ public class InfinispanNotificationsManager {
|
|||
public void cacheEntryModified(CacheEntryModifiedEvent<String, Serializable> event) {
|
||||
eventReceived(event.getKey(), event.getValue());
|
||||
}
|
||||
|
||||
@CacheEntryRemoved
|
||||
public void cacheEntryRemoved(CacheEntryRemovedEvent<String, Serializable> event) {
|
||||
taskFinished(event.getKey(), true);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -150,6 +190,14 @@ public class InfinispanNotificationsManager {
|
|||
hotrodEventReceived(key);
|
||||
}
|
||||
|
||||
|
||||
@ClientCacheEntryRemoved
|
||||
public void removed(ClientCacheEntryRemovedEvent event) {
|
||||
String key = event.getKey().toString();
|
||||
taskFinished(key, true);
|
||||
}
|
||||
|
||||
|
||||
private void hotrodEventReceived(String key) {
|
||||
// TODO: Look at CacheEventConverter stuff to possibly include value in the event and avoid additional remoteCache request
|
||||
Object value = workCache.get(key);
|
||||
|
@ -171,24 +219,39 @@ public class InfinispanNotificationsManager {
|
|||
}
|
||||
}
|
||||
|
||||
if (event.isIgnoreSenderSite()) {
|
||||
if (this.mySite != null && this.mySite.equals(event.getSender())) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
String eventKey = event.getEventKey();
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.tracef("Received event %s: %s", key, event);
|
||||
logger.tracef("Received event: %s", event);
|
||||
}
|
||||
|
||||
ClusterEvent wrappedEvent = event.getDelegateEvent();
|
||||
|
||||
List<ClusterListener> myListeners = listeners.get(key);
|
||||
if (myListeners != null) {
|
||||
for (ClusterListener listener : myListeners) {
|
||||
listener.eventReceived(wrappedEvent);
|
||||
}
|
||||
}
|
||||
|
||||
myListeners = listeners.get(ClusterProvider.ALL);
|
||||
List<ClusterListener> myListeners = listeners.get(eventKey);
|
||||
if (myListeners != null) {
|
||||
for (ClusterListener listener : myListeners) {
|
||||
listener.eventReceived(wrappedEvent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void taskFinished(String taskKey, boolean success) {
|
||||
TaskCallback callback = taskCallbacks.remove(taskKey);
|
||||
|
||||
if (callback != null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debugf("Finished task '%s' with '%b'", taskKey, success);
|
||||
}
|
||||
callback.setSuccess(success);
|
||||
callback.getTaskCompletedLatch().countDown();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.cluster.infinispan;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
class TaskCallback {
|
||||
|
||||
protected static final Logger logger = Logger.getLogger(TaskCallback.class);
|
||||
|
||||
static final int LATCH_TIMEOUT_MS = 10000;
|
||||
|
||||
private volatile boolean success;
|
||||
|
||||
private volatile Future<Boolean> future;
|
||||
|
||||
private final CountDownLatch taskCompletedLatch = new CountDownLatch(1);
|
||||
private final CountDownLatch futureAvailableLatch = new CountDownLatch(1);
|
||||
|
||||
|
||||
public void setSuccess(boolean success) {
|
||||
this.success = success;
|
||||
}
|
||||
|
||||
public boolean isSuccess() {
|
||||
return success;
|
||||
}
|
||||
|
||||
public void setFuture(Future<Boolean> future) {
|
||||
this.future = future;
|
||||
this.futureAvailableLatch.countDown();
|
||||
}
|
||||
|
||||
|
||||
public Future<Boolean> getFuture() {
|
||||
try {
|
||||
this.futureAvailableLatch.await(LATCH_TIMEOUT_MS, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException ie) {
|
||||
logger.error("Interrupted thread!");
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
|
||||
return future;
|
||||
}
|
||||
|
||||
|
||||
public CountDownLatch getTaskCompletedLatch() {
|
||||
return taskCompletedLatch;
|
||||
}
|
||||
}
|
|
@ -24,10 +24,21 @@ import org.keycloak.cluster.ClusterEvent;
|
|||
*/
|
||||
public class WrapperClusterEvent implements ClusterEvent {
|
||||
|
||||
private String sender; // will be null in non-clustered environment
|
||||
private String eventKey;
|
||||
private String sender;
|
||||
private String senderSite;
|
||||
private boolean ignoreSender;
|
||||
private boolean ignoreSenderSite;
|
||||
private ClusterEvent delegateEvent;
|
||||
|
||||
public String getEventKey() {
|
||||
return eventKey;
|
||||
}
|
||||
|
||||
public void setEventKey(String eventKey) {
|
||||
this.eventKey = eventKey;
|
||||
}
|
||||
|
||||
public String getSender() {
|
||||
return sender;
|
||||
}
|
||||
|
@ -36,6 +47,14 @@ public class WrapperClusterEvent implements ClusterEvent {
|
|||
this.sender = sender;
|
||||
}
|
||||
|
||||
public String getSenderSite() {
|
||||
return senderSite;
|
||||
}
|
||||
|
||||
public void setSenderSite(String senderSite) {
|
||||
this.senderSite = senderSite;
|
||||
}
|
||||
|
||||
public boolean isIgnoreSender() {
|
||||
return ignoreSender;
|
||||
}
|
||||
|
@ -44,6 +63,14 @@ public class WrapperClusterEvent implements ClusterEvent {
|
|||
this.ignoreSender = ignoreSender;
|
||||
}
|
||||
|
||||
public boolean isIgnoreSenderSite() {
|
||||
return ignoreSenderSite;
|
||||
}
|
||||
|
||||
public void setIgnoreSenderSite(boolean ignoreSenderSite) {
|
||||
this.ignoreSenderSite = ignoreSenderSite;
|
||||
}
|
||||
|
||||
public ClusterEvent getDelegateEvent() {
|
||||
return delegateEvent;
|
||||
}
|
||||
|
@ -54,6 +81,6 @@ public class WrapperClusterEvent implements ClusterEvent {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("WrapperClusterEvent [ sender=%s, delegateEvent=%s ]", sender, delegateEvent.toString());
|
||||
return String.format("WrapperClusterEvent [ eventKey=%s, sender=%s, senderSite=%s, delegateEvent=%s ]", eventKey, sender, senderSite, delegateEvent.toString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,9 +26,13 @@ import org.infinispan.manager.EmbeddedCacheManager;
|
|||
public class DefaultInfinispanConnectionProvider implements InfinispanConnectionProvider {
|
||||
|
||||
private EmbeddedCacheManager cacheManager;
|
||||
private final String siteName;
|
||||
private final String nodeName;
|
||||
|
||||
public DefaultInfinispanConnectionProvider(EmbeddedCacheManager cacheManager) {
|
||||
public DefaultInfinispanConnectionProvider(EmbeddedCacheManager cacheManager, String nodeName, String siteName) {
|
||||
this.cacheManager = cacheManager;
|
||||
this.nodeName = nodeName;
|
||||
this.siteName = siteName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -36,6 +40,16 @@ public class DefaultInfinispanConnectionProvider implements InfinispanConnection
|
|||
return cacheManager.getCache(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNodeName() {
|
||||
return nodeName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSiteName() {
|
||||
return siteName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
package org.keycloak.connections.infinispan;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.infinispan.commons.util.FileLookup;
|
||||
|
@ -30,6 +31,7 @@ import org.infinispan.eviction.EvictionType;
|
|||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
|
||||
import org.infinispan.remoting.transport.Transport;
|
||||
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
|
||||
import org.infinispan.transaction.LockingMode;
|
||||
import org.infinispan.transaction.TransactionMode;
|
||||
|
@ -38,8 +40,12 @@ import org.jboss.logging.Logger;
|
|||
import org.jgroups.JChannel;
|
||||
import org.keycloak.Config;
|
||||
import org.keycloak.cluster.infinispan.KeycloakHotRodMarshallerFactory;
|
||||
import org.keycloak.common.util.HostUtils;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.sessions.infinispan.remotestore.KcRemoteStoreConfigurationBuilder;
|
||||
import org.keycloak.models.sessions.infinispan.util.InfinispanUtil;
|
||||
import org.keycloak.models.utils.KeycloakModelUtils;
|
||||
|
||||
import javax.naming.InitialContext;
|
||||
|
||||
|
@ -56,11 +62,15 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
|
||||
protected boolean containerManaged;
|
||||
|
||||
private String nodeName;
|
||||
|
||||
private String siteName;
|
||||
|
||||
@Override
|
||||
public InfinispanConnectionProvider create(KeycloakSession session) {
|
||||
lazyInit();
|
||||
|
||||
return new DefaultInfinispanConnectionProvider(cacheManager);
|
||||
return new DefaultInfinispanConnectionProvider(cacheManager, nodeName, siteName);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -96,6 +106,8 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
} else {
|
||||
initEmbedded();
|
||||
}
|
||||
|
||||
logger.infof("Node name: %s, Site name: %s", nodeName, siteName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -134,7 +146,20 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
cacheManager.defineConfiguration(InfinispanConnectionProvider.AUTHORIZATION_REVISIONS_CACHE_NAME, getRevisionCacheConfig(authzRevisionsMaxEntries));
|
||||
cacheManager.getCache(InfinispanConnectionProvider.AUTHORIZATION_REVISIONS_CACHE_NAME, true);
|
||||
|
||||
|
||||
Transport transport = cacheManager.getTransport();
|
||||
if (transport != null) {
|
||||
this.nodeName = transport.getAddress().toString();
|
||||
this.siteName = cacheManager.getCacheManagerConfiguration().transport().siteId();
|
||||
if (this.siteName == null) {
|
||||
this.siteName = System.getProperty(InfinispanConnectionProvider.JBOSS_SITE_NAME);
|
||||
}
|
||||
} else {
|
||||
this.nodeName = System.getProperty(InfinispanConnectionProvider.JBOSS_NODE_NAME);
|
||||
this.siteName = System.getProperty(InfinispanConnectionProvider.JBOSS_SITE_NAME);
|
||||
}
|
||||
if (this.nodeName == null || this.nodeName.equals("localhost")) {
|
||||
this.nodeName = generateNodeName();
|
||||
}
|
||||
|
||||
logger.debugv("Using container managed Infinispan cache container, lookup={1}", cacheContainerLookup);
|
||||
} catch (Exception e) {
|
||||
|
@ -152,13 +177,27 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
boolean async = config.getBoolean("async", false);
|
||||
boolean allowDuplicateJMXDomains = config.getBoolean("allowDuplicateJMXDomains", true);
|
||||
|
||||
this.nodeName = config.get("nodeName", System.getProperty(InfinispanConnectionProvider.JBOSS_NODE_NAME));
|
||||
if (this.nodeName != null && this.nodeName.isEmpty()) {
|
||||
this.nodeName = null;
|
||||
}
|
||||
|
||||
this.siteName = config.get("siteName", System.getProperty(InfinispanConnectionProvider.JBOSS_SITE_NAME));
|
||||
if (this.siteName != null && this.siteName.isEmpty()) {
|
||||
this.siteName = null;
|
||||
}
|
||||
|
||||
if (clustered) {
|
||||
String nodeName = config.get("nodeName", System.getProperty(InfinispanConnectionProvider.JBOSS_NODE_NAME));
|
||||
String jgroupsUdpMcastAddr = config.get("jgroupsUdpMcastAddr", System.getProperty(InfinispanConnectionProvider.JGROUPS_UDP_MCAST_ADDR));
|
||||
configureTransport(gcb, nodeName, jgroupsUdpMcastAddr);
|
||||
configureTransport(gcb, nodeName, siteName, jgroupsUdpMcastAddr);
|
||||
gcb.globalJmxStatistics()
|
||||
.jmxDomain(InfinispanConnectionProvider.JMX_DOMAIN + "-" + nodeName);
|
||||
} else {
|
||||
if (nodeName == null) {
|
||||
nodeName = generateNodeName();
|
||||
}
|
||||
}
|
||||
|
||||
gcb.globalJmxStatistics()
|
||||
.allowDuplicateDomains(allowDuplicateJMXDomains)
|
||||
.enable();
|
||||
|
@ -166,6 +205,10 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
cacheManager = new DefaultCacheManager(gcb.build());
|
||||
containerManaged = false;
|
||||
|
||||
if (cacheManager.getTransport() != null) {
|
||||
nodeName = cacheManager.getTransport().getAddress().toString();
|
||||
}
|
||||
|
||||
logger.debug("Started embedded Infinispan cache container");
|
||||
|
||||
ConfigurationBuilder modelCacheConfigBuilder = new ConfigurationBuilder();
|
||||
|
@ -198,11 +241,29 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
.build();
|
||||
}
|
||||
|
||||
// Base configuration doesn't contain any remote stores
|
||||
Configuration sessionCacheConfigurationBase = sessionConfigBuilder.build();
|
||||
|
||||
boolean jdgEnabled = config.getBoolean("remoteStoreEnabled", false);
|
||||
|
||||
if (jdgEnabled) {
|
||||
sessionConfigBuilder = new ConfigurationBuilder();
|
||||
sessionConfigBuilder.read(sessionCacheConfigurationBase);
|
||||
configureRemoteCacheStore(sessionConfigBuilder, async, InfinispanConnectionProvider.SESSION_CACHE_NAME, KcRemoteStoreConfigurationBuilder.class);
|
||||
}
|
||||
Configuration sessionCacheConfiguration = sessionConfigBuilder.build();
|
||||
cacheManager.defineConfiguration(InfinispanConnectionProvider.SESSION_CACHE_NAME, sessionCacheConfiguration);
|
||||
|
||||
if (jdgEnabled) {
|
||||
sessionConfigBuilder = new ConfigurationBuilder();
|
||||
sessionConfigBuilder.read(sessionCacheConfigurationBase);
|
||||
configureRemoteCacheStore(sessionConfigBuilder, async, InfinispanConnectionProvider.OFFLINE_SESSION_CACHE_NAME, KcRemoteStoreConfigurationBuilder.class);
|
||||
}
|
||||
sessionCacheConfiguration = sessionConfigBuilder.build();
|
||||
cacheManager.defineConfiguration(InfinispanConnectionProvider.OFFLINE_SESSION_CACHE_NAME, sessionCacheConfiguration);
|
||||
cacheManager.defineConfiguration(InfinispanConnectionProvider.LOGIN_FAILURE_CACHE_NAME, sessionCacheConfiguration);
|
||||
cacheManager.defineConfiguration(InfinispanConnectionProvider.AUTHENTICATION_SESSIONS_CACHE_NAME, sessionCacheConfiguration);
|
||||
|
||||
cacheManager.defineConfiguration(InfinispanConnectionProvider.LOGIN_FAILURE_CACHE_NAME, sessionCacheConfigurationBase);
|
||||
cacheManager.defineConfiguration(InfinispanConnectionProvider.AUTHENTICATION_SESSIONS_CACHE_NAME, sessionCacheConfigurationBase);
|
||||
|
||||
// Retrieve caches to enforce rebalance
|
||||
cacheManager.getCache(InfinispanConnectionProvider.SESSION_CACHE_NAME, true);
|
||||
|
@ -215,9 +276,8 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
replicationConfigBuilder.clustering().cacheMode(async ? CacheMode.REPL_ASYNC : CacheMode.REPL_SYNC);
|
||||
}
|
||||
|
||||
boolean jdgEnabled = config.getBoolean("remoteStoreEnabled", false);
|
||||
if (jdgEnabled) {
|
||||
configureRemoteCacheStore(replicationConfigBuilder, async);
|
||||
configureRemoteCacheStore(replicationConfigBuilder, async, InfinispanConnectionProvider.WORK_CACHE_NAME, RemoteStoreConfigurationBuilder.class);
|
||||
}
|
||||
|
||||
Configuration replicationEvictionCacheConfiguration = replicationConfigBuilder.build();
|
||||
|
@ -267,6 +327,10 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
cacheManager.getCache(InfinispanConnectionProvider.AUTHORIZATION_REVISIONS_CACHE_NAME, true);
|
||||
}
|
||||
|
||||
protected String generateNodeName() {
|
||||
return InfinispanConnectionProvider.NODE_PREFIX + new SecureRandom().nextInt(1000000);
|
||||
}
|
||||
|
||||
private Configuration getRevisionCacheConfig(long maxEntries) {
|
||||
ConfigurationBuilder cb = new ConfigurationBuilder();
|
||||
cb.invocationBatching().enable().transaction().transactionMode(TransactionMode.TRANSACTIONAL);
|
||||
|
@ -281,19 +345,19 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
}
|
||||
|
||||
// Used for cross-data centers scenario. Usually integration with external JDG server, which itself handles communication between DCs.
|
||||
private void configureRemoteCacheStore(ConfigurationBuilder builder, boolean async) {
|
||||
private void configureRemoteCacheStore(ConfigurationBuilder builder, boolean async, String cacheName, Class<? extends RemoteStoreConfigurationBuilder> configBuilderClass) {
|
||||
String jdgServer = config.get("remoteStoreServer", "localhost");
|
||||
Integer jdgPort = config.getInt("remoteStorePort", 11222);
|
||||
|
||||
builder.persistence()
|
||||
.passivation(false)
|
||||
.addStore(RemoteStoreConfigurationBuilder.class)
|
||||
.addStore(configBuilderClass)
|
||||
.fetchPersistentState(false)
|
||||
.ignoreModifications(false)
|
||||
.purgeOnStartup(false)
|
||||
.preload(false)
|
||||
.shared(true)
|
||||
.remoteCacheName(InfinispanConnectionProvider.WORK_CACHE_NAME)
|
||||
.remoteCacheName(cacheName)
|
||||
.rawValues(true)
|
||||
.forceReturnValues(false)
|
||||
.marshaller(KeycloakHotRodMarshallerFactory.class.getName())
|
||||
|
@ -355,7 +419,7 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
|
||||
private static final Object CHANNEL_INIT_SYNCHRONIZER = new Object();
|
||||
|
||||
protected void configureTransport(GlobalConfigurationBuilder gcb, String nodeName, String jgroupsUdpMcastAddr) {
|
||||
protected void configureTransport(GlobalConfigurationBuilder gcb, String nodeName, String siteName, String jgroupsUdpMcastAddr) {
|
||||
if (nodeName == null) {
|
||||
gcb.transport().defaultTransport();
|
||||
} else {
|
||||
|
@ -376,6 +440,7 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
|
|||
|
||||
gcb.transport()
|
||||
.nodeName(nodeName)
|
||||
.siteId(siteName)
|
||||
.transport(transport)
|
||||
.globalJmxStatistics()
|
||||
.jmxDomain(InfinispanConnectionProvider.JMX_DOMAIN + "-" + nodeName)
|
||||
|
|
|
@ -55,8 +55,25 @@ public interface InfinispanConnectionProvider extends Provider {
|
|||
String JBOSS_NODE_NAME = "jboss.node.name";
|
||||
String JGROUPS_UDP_MCAST_ADDR = "jgroups.udp.mcast_addr";
|
||||
|
||||
// TODO This property is not in Wildfly. Check if corresponding property in Wildfly exists
|
||||
String JBOSS_SITE_NAME = "jboss.site.name";
|
||||
|
||||
String JMX_DOMAIN = "jboss.datagrid-infinispan";
|
||||
|
||||
// Constant used as the prefix of the current node if "jboss.node.name" is not configured
|
||||
String NODE_PREFIX = "node_";
|
||||
|
||||
<K, V> Cache<K, V> getCache(String name);
|
||||
|
||||
/**
|
||||
* @return Address of current node in cluster. In non-cluster environment, it returns some other non-null value (eg. hostname with some random value like "host-123456" )
|
||||
*/
|
||||
String getNodeName();
|
||||
|
||||
/**
|
||||
*
|
||||
* @return siteName or null if we're not in environment with multiple sites (data centers)
|
||||
*/
|
||||
String getSiteName();
|
||||
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class InfinispanPublicKeyStorageProvider implements PublicKeyStorageProvi
|
|||
public void clearCache() {
|
||||
keys.clear();
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
cluster.notify(InfinispanPublicKeyStorageProviderFactory.KEYS_CLEAR_CACHE_EVENTS, new ClearCacheEvent(), true);
|
||||
cluster.notify(InfinispanPublicKeyStorageProviderFactory.KEYS_CLEAR_CACHE_EVENTS, new ClearCacheEvent(), true, ClusterProvider.DCNotify.ALL_DCS);
|
||||
}
|
||||
|
||||
|
||||
|
@ -122,7 +122,7 @@ public class InfinispanPublicKeyStorageProvider implements PublicKeyStorageProvi
|
|||
|
||||
for (String cacheKey : invalidations) {
|
||||
keys.remove(cacheKey);
|
||||
cluster.notify(cacheKey, PublicKeyStorageInvalidationEvent.create(cacheKey), true);
|
||||
cluster.notify(InfinispanPublicKeyStorageProviderFactory.PUBLIC_KEY_STORAGE_INVALIDATION_EVENT, PublicKeyStorageInvalidationEvent.create(cacheKey), true, ClusterProvider.DCNotify.ALL_DCS);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,6 +50,8 @@ public class InfinispanPublicKeyStorageProviderFactory implements PublicKeyStora
|
|||
|
||||
public static final String KEYS_CLEAR_CACHE_EVENTS = "KEYS_CLEAR_CACHE_EVENTS";
|
||||
|
||||
public static final String PUBLIC_KEY_STORAGE_INVALIDATION_EVENT = "PUBLIC_KEY_STORAGE_INVALIDATION_EVENT";
|
||||
|
||||
private volatile Cache<String, PublicKeysEntry> keysCache;
|
||||
|
||||
private final Map<String, FutureTask<PublicKeysEntry>> tasksInProgress = new ConcurrentHashMap<>();
|
||||
|
@ -69,12 +71,10 @@ public class InfinispanPublicKeyStorageProviderFactory implements PublicKeyStora
|
|||
this.keysCache = session.getProvider(InfinispanConnectionProvider.class).getCache(InfinispanConnectionProvider.KEYS_CACHE_NAME);
|
||||
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
cluster.registerListener(ClusterProvider.ALL, (ClusterEvent event) -> {
|
||||
cluster.registerListener(PUBLIC_KEY_STORAGE_INVALIDATION_EVENT, (ClusterEvent event) -> {
|
||||
|
||||
if (event instanceof PublicKeyStorageInvalidationEvent) {
|
||||
PublicKeyStorageInvalidationEvent invalidationEvent = (PublicKeyStorageInvalidationEvent) event;
|
||||
keysCache.remove(invalidationEvent.getCacheKey());
|
||||
}
|
||||
PublicKeyStorageInvalidationEvent invalidationEvent = (PublicKeyStorageInvalidationEvent) event;
|
||||
keysCache.remove(invalidationEvent.getCacheKey());
|
||||
|
||||
});
|
||||
|
||||
|
|
|
@ -198,22 +198,15 @@ public abstract class CacheManager {
|
|||
}
|
||||
|
||||
|
||||
public void sendInvalidationEvents(KeycloakSession session, Collection<InvalidationEvent> invalidationEvents) {
|
||||
public void sendInvalidationEvents(KeycloakSession session, Collection<InvalidationEvent> invalidationEvents, String eventKey) {
|
||||
ClusterProvider clusterProvider = session.getProvider(ClusterProvider.class);
|
||||
|
||||
// Maybe add InvalidationEvent, which will be collection of all invalidationEvents? That will reduce cluster traffic even more.
|
||||
for (InvalidationEvent event : invalidationEvents) {
|
||||
clusterProvider.notify(generateEventId(event), event, true);
|
||||
clusterProvider.notify(eventKey, event, true, ClusterProvider.DCNotify.ALL_DCS);
|
||||
}
|
||||
}
|
||||
|
||||
protected String generateEventId(InvalidationEvent event) {
|
||||
return new StringBuilder(event.getId())
|
||||
.append("_")
|
||||
.append(event.hashCode())
|
||||
.toString();
|
||||
}
|
||||
|
||||
|
||||
public void invalidationEventReceived(InvalidationEvent event) {
|
||||
Set<String> invalidations = new HashSet<>();
|
||||
|
|
|
@ -38,6 +38,7 @@ public class InfinispanCacheRealmProviderFactory implements CacheRealmProviderFa
|
|||
|
||||
private static final Logger log = Logger.getLogger(InfinispanCacheRealmProviderFactory.class);
|
||||
public static final String REALM_CLEAR_CACHE_EVENTS = "REALM_CLEAR_CACHE_EVENTS";
|
||||
public static final String REALM_INVALIDATION_EVENTS = "REALM_INVALIDATION_EVENTS";
|
||||
|
||||
protected volatile RealmCacheManager realmCache;
|
||||
|
||||
|
@ -56,12 +57,11 @@ public class InfinispanCacheRealmProviderFactory implements CacheRealmProviderFa
|
|||
realmCache = new RealmCacheManager(cache, revisions);
|
||||
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
cluster.registerListener(ClusterProvider.ALL, (ClusterEvent event) -> {
|
||||
cluster.registerListener(REALM_INVALIDATION_EVENTS, (ClusterEvent event) -> {
|
||||
|
||||
InvalidationEvent invalidationEvent = (InvalidationEvent) event;
|
||||
realmCache.invalidationEventReceived(invalidationEvent);
|
||||
|
||||
if (event instanceof InvalidationEvent) {
|
||||
InvalidationEvent invalidationEvent = (InvalidationEvent) event;
|
||||
realmCache.invalidationEventReceived(invalidationEvent);
|
||||
}
|
||||
});
|
||||
|
||||
cluster.registerListener(REALM_CLEAR_CACHE_EVENTS, (ClusterEvent event) -> {
|
||||
|
|
|
@ -37,6 +37,7 @@ public class InfinispanUserCacheProviderFactory implements UserCacheProviderFact
|
|||
|
||||
private static final Logger log = Logger.getLogger(InfinispanUserCacheProviderFactory.class);
|
||||
public static final String USER_CLEAR_CACHE_EVENTS = "USER_CLEAR_CACHE_EVENTS";
|
||||
public static final String USER_INVALIDATION_EVENTS = "USER_INVALIDATION_EVENTS";
|
||||
|
||||
protected volatile UserCacheManager userCache;
|
||||
|
||||
|
@ -58,12 +59,10 @@ public class InfinispanUserCacheProviderFactory implements UserCacheProviderFact
|
|||
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
|
||||
cluster.registerListener(ClusterProvider.ALL, (ClusterEvent event) -> {
|
||||
cluster.registerListener(USER_INVALIDATION_EVENTS, (ClusterEvent event) -> {
|
||||
|
||||
if (event instanceof InvalidationEvent) {
|
||||
InvalidationEvent invalidationEvent = (InvalidationEvent) event;
|
||||
userCache.invalidationEventReceived(invalidationEvent);
|
||||
}
|
||||
InvalidationEvent invalidationEvent = (InvalidationEvent) event;
|
||||
userCache.invalidationEventReceived(invalidationEvent);
|
||||
|
||||
});
|
||||
|
||||
|
|
|
@ -95,11 +95,9 @@ public class RealmCacheManager extends CacheManager {
|
|||
|
||||
@Override
|
||||
protected void addInvalidationsFromEvent(InvalidationEvent event, Set<String> invalidations) {
|
||||
if (event instanceof RealmCacheInvalidationEvent) {
|
||||
invalidations.add(event.getId());
|
||||
invalidations.add(event.getId());
|
||||
|
||||
((RealmCacheInvalidationEvent) event).addInvalidations(this, invalidations);
|
||||
}
|
||||
((RealmCacheInvalidationEvent) event).addInvalidations(this, invalidations);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ public class RealmCacheSession implements CacheRealmProvider {
|
|||
@Override
|
||||
public void clear() {
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
cluster.notify(InfinispanCacheRealmProviderFactory.REALM_CLEAR_CACHE_EVENTS, new ClearCacheEvent(), false);
|
||||
cluster.notify(InfinispanCacheRealmProviderFactory.REALM_CLEAR_CACHE_EVENTS, new ClearCacheEvent(), false, ClusterProvider.DCNotify.ALL_DCS);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -298,7 +298,7 @@ public class RealmCacheSession implements CacheRealmProvider {
|
|||
cache.invalidateObject(id);
|
||||
}
|
||||
|
||||
cache.sendInvalidationEvents(session, invalidationEvents);
|
||||
cache.sendInvalidationEvents(session, invalidationEvents, InfinispanCacheRealmProviderFactory.REALM_INVALIDATION_EVENTS);
|
||||
}
|
||||
|
||||
private KeycloakTransaction getPrepareTransaction() {
|
||||
|
|
|
@ -95,9 +95,7 @@ public class UserCacheManager extends CacheManager {
|
|||
|
||||
@Override
|
||||
protected void addInvalidationsFromEvent(InvalidationEvent event, Set<String> invalidations) {
|
||||
if (event instanceof UserCacheInvalidationEvent) {
|
||||
((UserCacheInvalidationEvent) event).addInvalidations(this, invalidations);
|
||||
}
|
||||
((UserCacheInvalidationEvent) event).addInvalidations(this, invalidations);
|
||||
}
|
||||
|
||||
public void invalidateRealmUsers(String realm, Set<String> invalidations) {
|
||||
|
|
|
@ -90,7 +90,7 @@ public class UserCacheSession implements UserCache {
|
|||
public void clear() {
|
||||
cache.clear();
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
cluster.notify(InfinispanUserCacheProviderFactory.USER_CLEAR_CACHE_EVENTS, new ClearCacheEvent(), true);
|
||||
cluster.notify(InfinispanUserCacheProviderFactory.USER_CLEAR_CACHE_EVENTS, new ClearCacheEvent(), true, ClusterProvider.DCNotify.ALL_DCS);
|
||||
}
|
||||
|
||||
public UserProvider getDelegate() {
|
||||
|
@ -129,7 +129,7 @@ public class UserCacheSession implements UserCache {
|
|||
cache.invalidateObject(invalidation);
|
||||
}
|
||||
|
||||
cache.sendInvalidationEvents(session, invalidationEvents);
|
||||
cache.sendInvalidationEvents(session, invalidationEvents, InfinispanUserCacheProviderFactory.USER_INVALIDATION_EVENTS);
|
||||
}
|
||||
|
||||
private KeycloakTransaction getTransaction() {
|
||||
|
|
|
@ -41,6 +41,7 @@ public class InfinispanCacheStoreFactoryProviderFactory implements CachedStorePr
|
|||
|
||||
private static final Logger log = Logger.getLogger(InfinispanCacheStoreFactoryProviderFactory.class);
|
||||
public static final String AUTHORIZATION_CLEAR_CACHE_EVENTS = "AUTHORIZATION_CLEAR_CACHE_EVENTS";
|
||||
public static final String AUTHORIZATION_INVALIDATION_EVENTS = "AUTHORIZATION_INVALIDATION_EVENTS";
|
||||
|
||||
protected volatile StoreFactoryCacheManager storeCache;
|
||||
|
||||
|
@ -59,11 +60,11 @@ public class InfinispanCacheStoreFactoryProviderFactory implements CachedStorePr
|
|||
storeCache = new StoreFactoryCacheManager(cache, revisions);
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
|
||||
cluster.registerListener(ClusterProvider.ALL, (ClusterEvent event) -> {
|
||||
if (event instanceof InvalidationEvent) {
|
||||
InvalidationEvent invalidationEvent = (InvalidationEvent) event;
|
||||
storeCache.invalidationEventReceived(invalidationEvent);
|
||||
}
|
||||
cluster.registerListener(AUTHORIZATION_INVALIDATION_EVENTS, (ClusterEvent event) -> {
|
||||
|
||||
InvalidationEvent invalidationEvent = (InvalidationEvent) event;
|
||||
storeCache.invalidationEventReceived(invalidationEvent);
|
||||
|
||||
});
|
||||
|
||||
cluster.registerListener(AUTHORIZATION_CLEAR_CACHE_EVENTS, (ClusterEvent event) -> storeCache.clear());
|
||||
|
|
|
@ -216,7 +216,7 @@ public class StoreFactoryCacheSession implements CachedStoreFactoryProvider {
|
|||
cache.invalidateObject(id);
|
||||
}
|
||||
|
||||
cache.sendInvalidationEvents(session, invalidationEvents);
|
||||
cache.sendInvalidationEvents(session, invalidationEvents, InfinispanCacheStoreFactoryProviderFactory.AUTHORIZATION_INVALIDATION_EVENTS);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -22,13 +22,17 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.keycloak.models.AuthenticatedClientSessionModel;
|
||||
import org.keycloak.models.ClientModel;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.UserSessionModel;
|
||||
import org.keycloak.models.sessions.infinispan.changes.InfinispanChangelogBasedTransaction;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask;
|
||||
import org.keycloak.models.sessions.infinispan.changes.UserSessionClientSessionUpdateTask;
|
||||
import org.keycloak.models.sessions.infinispan.changes.UserSessionUpdateTask;
|
||||
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
/**
|
||||
|
@ -39,19 +43,20 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
|
|||
private final AuthenticatedClientSessionEntity entity;
|
||||
private final ClientModel client;
|
||||
private final InfinispanUserSessionProvider provider;
|
||||
private final Cache<String, SessionEntity> cache;
|
||||
private final InfinispanChangelogBasedTransaction updateTx;
|
||||
private UserSessionAdapter userSession;
|
||||
|
||||
public AuthenticatedClientSessionAdapter(AuthenticatedClientSessionEntity entity, ClientModel client, UserSessionAdapter userSession, InfinispanUserSessionProvider provider, Cache<String, SessionEntity> cache) {
|
||||
public AuthenticatedClientSessionAdapter(AuthenticatedClientSessionEntity entity, ClientModel client, UserSessionAdapter userSession,
|
||||
InfinispanUserSessionProvider provider, InfinispanChangelogBasedTransaction updateTx) {
|
||||
this.provider = provider;
|
||||
this.entity = entity;
|
||||
this.client = client;
|
||||
this.cache = cache;
|
||||
this.updateTx = updateTx;
|
||||
this.userSession = userSession;
|
||||
}
|
||||
|
||||
private void update() {
|
||||
provider.getTx().replace(cache, userSession.getEntity().getId(), userSession.getEntity());
|
||||
private void update(UserSessionUpdateTask task) {
|
||||
updateTx.addTask(userSession.getId(), task);
|
||||
}
|
||||
|
||||
|
||||
|
@ -62,15 +67,27 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
|
|||
|
||||
// Dettach userSession
|
||||
if (userSession == null) {
|
||||
if (sessionEntity.getAuthenticatedClientSessions() != null) {
|
||||
sessionEntity.getAuthenticatedClientSessions().remove(clientUUID);
|
||||
update();
|
||||
this.userSession = null;
|
||||
}
|
||||
UserSessionUpdateTask task = new UserSessionUpdateTask() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity sessionEntity) {
|
||||
sessionEntity.getAuthenticatedClientSessions().remove(clientUUID);
|
||||
}
|
||||
|
||||
};
|
||||
update(task);
|
||||
this.userSession = null;
|
||||
} else {
|
||||
this.userSession = (UserSessionAdapter) userSession;
|
||||
sessionEntity.getAuthenticatedClientSessions().put(clientUUID, entity);
|
||||
update();
|
||||
UserSessionUpdateTask task = new UserSessionUpdateTask() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity sessionEntity) {
|
||||
sessionEntity.getAuthenticatedClientSessions().put(clientUUID, entity);
|
||||
}
|
||||
|
||||
};
|
||||
update(task);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,8 +103,16 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
|
|||
|
||||
@Override
|
||||
public void setRedirectUri(String uri) {
|
||||
entity.setRedirectUri(uri);
|
||||
update();
|
||||
UserSessionClientSessionUpdateTask task = new UserSessionClientSessionUpdateTask(client.getId()) {
|
||||
|
||||
@Override
|
||||
protected void runClientSessionUpdate(AuthenticatedClientSessionEntity entity) {
|
||||
entity.setRedirectUri(uri);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -112,8 +137,22 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
|
|||
|
||||
@Override
|
||||
public void setTimestamp(int timestamp) {
|
||||
entity.setTimestamp(timestamp);
|
||||
update();
|
||||
UserSessionClientSessionUpdateTask task = new UserSessionClientSessionUpdateTask(client.getId()) {
|
||||
|
||||
@Override
|
||||
protected void runClientSessionUpdate(AuthenticatedClientSessionEntity entity) {
|
||||
entity.setTimestamp(timestamp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
|
||||
// We usually update lastSessionRefresh at the same time. That would handle it.
|
||||
return CrossDCMessageStatus.NOT_NEEDED;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -123,8 +162,16 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
|
|||
|
||||
@Override
|
||||
public void setAction(String action) {
|
||||
entity.setAction(action);
|
||||
update();
|
||||
UserSessionClientSessionUpdateTask task = new UserSessionClientSessionUpdateTask(client.getId()) {
|
||||
|
||||
@Override
|
||||
protected void runClientSessionUpdate(AuthenticatedClientSessionEntity entity) {
|
||||
entity.setAction(action);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -134,8 +181,16 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
|
|||
|
||||
@Override
|
||||
public void setProtocol(String method) {
|
||||
entity.setAuthMethod(method);
|
||||
update();
|
||||
UserSessionClientSessionUpdateTask task = new UserSessionClientSessionUpdateTask(client.getId()) {
|
||||
|
||||
@Override
|
||||
protected void runClientSessionUpdate(AuthenticatedClientSessionEntity entity) {
|
||||
entity.setAuthMethod(method);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -145,8 +200,16 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
|
|||
|
||||
@Override
|
||||
public void setRoles(Set<String> roles) {
|
||||
entity.setRoles(roles);
|
||||
update();
|
||||
UserSessionClientSessionUpdateTask task = new UserSessionClientSessionUpdateTask(client.getId()) {
|
||||
|
||||
@Override
|
||||
protected void runClientSessionUpdate(AuthenticatedClientSessionEntity entity) {
|
||||
entity.setRoles(roles); // TODO not thread-safe. But we will remove setRoles anyway...?
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -156,35 +219,54 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
|
|||
|
||||
@Override
|
||||
public void setProtocolMappers(Set<String> protocolMappers) {
|
||||
entity.setProtocolMappers(protocolMappers);
|
||||
update();
|
||||
UserSessionClientSessionUpdateTask task = new UserSessionClientSessionUpdateTask(client.getId()) {
|
||||
|
||||
@Override
|
||||
protected void runClientSessionUpdate(AuthenticatedClientSessionEntity entity) {
|
||||
entity.setProtocolMappers(protocolMappers); // TODO not thread-safe. But we will remove setProtocolMappers anyway...?
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNote(String name) {
|
||||
return entity.getNotes()==null ? null : entity.getNotes().get(name);
|
||||
return entity.getNotes().get(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNote(String name, String value) {
|
||||
if (entity.getNotes() == null) {
|
||||
entity.setNotes(new HashMap<>());
|
||||
}
|
||||
entity.getNotes().put(name, value);
|
||||
update();
|
||||
UserSessionClientSessionUpdateTask task = new UserSessionClientSessionUpdateTask(client.getId()) {
|
||||
|
||||
@Override
|
||||
protected void runClientSessionUpdate(AuthenticatedClientSessionEntity entity) {
|
||||
entity.getNotes().put(name, value);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNote(String name) {
|
||||
if (entity.getNotes() != null) {
|
||||
entity.getNotes().remove(name);
|
||||
update();
|
||||
}
|
||||
UserSessionClientSessionUpdateTask task = new UserSessionClientSessionUpdateTask(client.getId()) {
|
||||
|
||||
@Override
|
||||
protected void runClientSessionUpdate(AuthenticatedClientSessionEntity entity) {
|
||||
entity.getNotes().remove(name);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getNotes() {
|
||||
if (entity.getNotes() == null || entity.getNotes().isEmpty()) return Collections.emptyMap();
|
||||
if (entity.getNotes().isEmpty()) return Collections.emptyMap();
|
||||
Map<String, String> copy = new HashMap<>();
|
||||
copy.putAll(entity.getNotes());
|
||||
return copy;
|
||||
|
|
|
@ -15,27 +15,24 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.mapreduce;
|
||||
package org.keycloak.models.sessions.infinispan;
|
||||
|
||||
import org.infinispan.distexec.mapreduce.Reducer;
|
||||
|
||||
import java.util.Iterator;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.context.Flag;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class LargestResultReducer implements Reducer<String, Integer> {
|
||||
public class CacheDecorators {
|
||||
|
||||
@Override
|
||||
public Integer reduce(String reducedKey, Iterator<Integer> itr) {
|
||||
Integer largest = itr.next();
|
||||
while (itr.hasNext()) {
|
||||
Integer next = itr.next();
|
||||
if (next > largest) {
|
||||
largest = next;
|
||||
}
|
||||
}
|
||||
return largest;
|
||||
public static <K, V> AdvancedCache<K, V> localCache(Cache<K, V> cache) {
|
||||
return cache.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL);
|
||||
}
|
||||
|
||||
public static <K, V> AdvancedCache<K, V> skipCacheLoaders(Cache<K, V> cache) {
|
||||
return cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD, Flag.SKIP_CACHE_STORE);
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -61,10 +61,6 @@ public class InfinispanActionTokenStoreProvider implements ActionTokenStoreProvi
|
|||
this.tx.put(actionKeyCache, tokenKey, tokenValue, key.getExpiration() - Time.currentTime(), TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
private static String generateActionTokenEventId() {
|
||||
return InfinispanActionTokenStoreProviderFactory.ACTION_TOKEN_EVENTS + "/" + UUID.randomUUID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionTokenValueModel get(ActionTokenKeyModel actionTokenKey) {
|
||||
if (actionTokenKey == null || actionTokenKey.getUserId() == null || actionTokenKey.getActionId() == null) {
|
||||
|
@ -98,6 +94,6 @@ public class InfinispanActionTokenStoreProvider implements ActionTokenStoreProvi
|
|||
}
|
||||
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
this.tx.notify(cluster, generateActionTokenEventId(), new RemoveActionTokensSpecificEvent(userId, actionId), false);
|
||||
this.tx.notify(cluster, InfinispanActionTokenStoreProviderFactory.ACTION_TOKEN_EVENTS, new RemoveActionTokensSpecificEvent(userId, actionId), false);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,24 +70,24 @@ public class InfinispanActionTokenStoreProviderFactory implements ActionTokenSto
|
|||
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
|
||||
cluster.registerListener(ClusterProvider.ALL, event -> {
|
||||
if (event instanceof RemoveActionTokensSpecificEvent) {
|
||||
RemoveActionTokensSpecificEvent e = (RemoveActionTokensSpecificEvent) event;
|
||||
cluster.registerListener(ACTION_TOKEN_EVENTS, event -> {
|
||||
|
||||
LOG.debugf("[%s] Removing token invalidation for user+action: userId=%s, actionId=%s", cacheAddress, e.getUserId(), e.getActionId());
|
||||
RemoveActionTokensSpecificEvent e = (RemoveActionTokensSpecificEvent) event;
|
||||
|
||||
AdvancedCache<ActionTokenReducedKey, ActionTokenValueEntity> localCache = cache
|
||||
.getAdvancedCache()
|
||||
.withFlags(Flag.CACHE_MODE_LOCAL, Flag.SKIP_CACHE_LOAD);
|
||||
LOG.debugf("[%s] Removing token invalidation for user+action: userId=%s, actionId=%s", cacheAddress, e.getUserId(), e.getActionId());
|
||||
|
||||
List<ActionTokenReducedKey> toRemove = localCache
|
||||
.keySet()
|
||||
.stream()
|
||||
.filter(k -> Objects.equals(k.getUserId(), e.getUserId()) && Objects.equals(k.getActionId(), e.getActionId()))
|
||||
.collect(Collectors.toList());
|
||||
AdvancedCache<ActionTokenReducedKey, ActionTokenValueEntity> localCache = cache
|
||||
.getAdvancedCache()
|
||||
.withFlags(Flag.CACHE_MODE_LOCAL, Flag.SKIP_CACHE_LOAD);
|
||||
|
||||
List<ActionTokenReducedKey> toRemove = localCache
|
||||
.keySet()
|
||||
.stream()
|
||||
.filter(k -> Objects.equals(k.getUserId(), e.getUserId()) && Objects.equals(k.getActionId(), e.getActionId()))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
toRemove.forEach(localCache::remove);
|
||||
|
||||
toRemove.forEach(localCache::remove);
|
||||
}
|
||||
});
|
||||
|
||||
LOG.debugf("[%s] Registered cluster listeners", cacheAddress);
|
||||
|
|
|
@ -30,6 +30,9 @@ import org.keycloak.models.KeycloakSession;
|
|||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.cache.infinispan.events.AuthenticationSessionAuthNoteUpdateEvent;
|
||||
import org.keycloak.models.sessions.infinispan.entities.AuthenticationSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.events.ClientRemovedSessionEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.SessionEventsSenderTransaction;
|
||||
import org.keycloak.models.sessions.infinispan.stream.AuthenticationSessionPredicate;
|
||||
import org.keycloak.models.utils.KeycloakModelUtils;
|
||||
import org.keycloak.models.utils.RealmInfoUtil;
|
||||
|
@ -46,13 +49,17 @@ public class InfinispanAuthenticationSessionProvider implements AuthenticationSe
|
|||
private final KeycloakSession session;
|
||||
private final Cache<String, AuthenticationSessionEntity> cache;
|
||||
protected final InfinispanKeycloakTransaction tx;
|
||||
protected final SessionEventsSenderTransaction clusterEventsSenderTx;
|
||||
|
||||
public InfinispanAuthenticationSessionProvider(KeycloakSession session, Cache<String, AuthenticationSessionEntity> cache) {
|
||||
this.session = session;
|
||||
this.cache = cache;
|
||||
|
||||
this.tx = new InfinispanKeycloakTransaction();
|
||||
this.clusterEventsSenderTx = new SessionEventsSenderTransaction(session);
|
||||
|
||||
session.getTransactionManager().enlistAfterCompletion(tx);
|
||||
session.getTransactionManager().enlistAfterCompletion(clusterEventsSenderTx);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -109,37 +116,61 @@ public class InfinispanAuthenticationSessionProvider implements AuthenticationSe
|
|||
|
||||
|
||||
// Each cluster node cleanups just local sessions, which are those owned by himself (+ few more taking l1 cache into account)
|
||||
Iterator<Map.Entry<String, AuthenticationSessionEntity>> itr = cache.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL)
|
||||
.entrySet().stream().filter(AuthenticationSessionPredicate.create(realm.getId()).expired(expired)).iterator();
|
||||
Iterator<Map.Entry<String, AuthenticationSessionEntity>> itr = CacheDecorators.localCache(cache)
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(AuthenticationSessionPredicate.create(realm.getId()).expired(expired))
|
||||
.iterator();
|
||||
|
||||
int counter = 0;
|
||||
while (itr.hasNext()) {
|
||||
counter++;
|
||||
AuthenticationSessionEntity entity = itr.next().getValue();
|
||||
tx.remove(cache, entity.getId());
|
||||
tx.remove(CacheDecorators.localCache(cache), entity.getId());
|
||||
}
|
||||
|
||||
log.debugf("Removed %d expired user sessions for realm '%s'", counter, realm.getName());
|
||||
log.debugf("Removed %d expired authentication sessions for realm '%s'", counter, realm.getName());
|
||||
}
|
||||
|
||||
// TODO: Should likely listen to "RealmRemovedEvent" received from cluster and clean just local sessions
|
||||
|
||||
@Override
|
||||
public void onRealmRemoved(RealmModel realm) {
|
||||
Iterator<Map.Entry<String, AuthenticationSessionEntity>> itr = cache.entrySet().stream().filter(AuthenticationSessionPredicate.create(realm.getId())).iterator();
|
||||
clusterEventsSenderTx.addEvent(InfinispanAuthenticationSessionProviderFactory.REALM_REMOVED_AUTHSESSION_EVENT, RealmRemovedSessionEvent.create(realm.getId()), true);
|
||||
}
|
||||
|
||||
protected void onRealmRemovedEvent(String realmId) {
|
||||
Iterator<Map.Entry<String, AuthenticationSessionEntity>> itr = CacheDecorators.localCache(cache)
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(AuthenticationSessionPredicate.create(realmId))
|
||||
.iterator();
|
||||
|
||||
while (itr.hasNext()) {
|
||||
cache.remove(itr.next().getKey());
|
||||
CacheDecorators.localCache(cache)
|
||||
.remove(itr.next().getKey());
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Should likely listen to "ClientRemovedEvent" received from cluster and clean just local sessions
|
||||
|
||||
@Override
|
||||
public void onClientRemoved(RealmModel realm, ClientModel client) {
|
||||
Iterator<Map.Entry<String, AuthenticationSessionEntity>> itr = cache.entrySet().stream().filter(AuthenticationSessionPredicate.create(realm.getId()).client(client.getId())).iterator();
|
||||
clusterEventsSenderTx.addEvent(InfinispanAuthenticationSessionProviderFactory.CLIENT_REMOVED_AUTHSESSION_EVENT, ClientRemovedSessionEvent.create(realm.getId(), client.getId()), true);
|
||||
}
|
||||
|
||||
protected void onClientRemovedEvent(String realmId, String clientUuid) {
|
||||
Iterator<Map.Entry<String, AuthenticationSessionEntity>> itr = CacheDecorators.localCache(cache)
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(AuthenticationSessionPredicate.create(realmId).client(clientUuid))
|
||||
.iterator();
|
||||
|
||||
while (itr.hasNext()) {
|
||||
cache.remove(itr.next().getKey());
|
||||
CacheDecorators.localCache(cache)
|
||||
.remove(itr.next().getKey());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void updateNonlocalSessionAuthNotes(String authSessionId, Map<String, String> authNotesFragment) {
|
||||
if (authSessionId == null) {
|
||||
|
@ -150,7 +181,8 @@ public class InfinispanAuthenticationSessionProvider implements AuthenticationSe
|
|||
cluster.notify(
|
||||
InfinispanAuthenticationSessionProviderFactory.AUTHENTICATION_SESSION_EVENTS,
|
||||
AuthenticationSessionAuthNoteUpdateEvent.create(authSessionId, authNotesFragment),
|
||||
true
|
||||
true,
|
||||
ClusterProvider.DCNotify.ALL_BUT_LOCAL_DC
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -159,4 +191,7 @@ public class InfinispanAuthenticationSessionProvider implements AuthenticationSe
|
|||
|
||||
}
|
||||
|
||||
public Cache<String, AuthenticationSessionEntity> getCache() {
|
||||
return cache;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,12 @@ import org.keycloak.models.KeycloakSession;
|
|||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.cache.infinispan.events.AuthenticationSessionAuthNoteUpdateEvent;
|
||||
import org.keycloak.models.sessions.infinispan.entities.AuthenticationSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.events.AbstractAuthSessionClusterListener;
|
||||
import org.keycloak.models.sessions.infinispan.events.ClientRemovedSessionEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
|
||||
import org.keycloak.models.utils.PostMigrationEvent;
|
||||
import org.keycloak.provider.ProviderEvent;
|
||||
import org.keycloak.provider.ProviderEventListener;
|
||||
import org.keycloak.sessions.AuthenticationSessionProvider;
|
||||
import org.keycloak.sessions.AuthenticationSessionProviderFactory;
|
||||
import java.util.Map;
|
||||
|
@ -42,13 +48,59 @@ public class InfinispanAuthenticationSessionProviderFactory implements Authentic
|
|||
|
||||
private volatile Cache<String, AuthenticationSessionEntity> authSessionsCache;
|
||||
|
||||
public static final String PROVIDER_ID = "infinispan";
|
||||
|
||||
public static final String AUTHENTICATION_SESSION_EVENTS = "AUTHENTICATION_SESSION_EVENTS";
|
||||
|
||||
public static final String REALM_REMOVED_AUTHSESSION_EVENT = "REALM_REMOVED_EVENT_AUTHSESSIONS";
|
||||
|
||||
public static final String CLIENT_REMOVED_AUTHSESSION_EVENT = "CLIENT_REMOVED_SESSION_AUTHSESSIONS";
|
||||
|
||||
@Override
|
||||
public void init(Config.Scope config) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void postInit(KeycloakSessionFactory factory) {
|
||||
factory.register(new ProviderEventListener() {
|
||||
|
||||
@Override
|
||||
public void onEvent(ProviderEvent event) {
|
||||
if (event instanceof PostMigrationEvent) {
|
||||
registerClusterListeners(((PostMigrationEvent) event).getSession());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
protected void registerClusterListeners(KeycloakSession session) {
|
||||
KeycloakSessionFactory sessionFactory = session.getKeycloakSessionFactory();
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
|
||||
cluster.registerListener(REALM_REMOVED_AUTHSESSION_EVENT, new AbstractAuthSessionClusterListener<RealmRemovedSessionEvent>(sessionFactory) {
|
||||
|
||||
@Override
|
||||
protected void eventReceived(KeycloakSession session, InfinispanAuthenticationSessionProvider provider, RealmRemovedSessionEvent sessionEvent) {
|
||||
provider.onRealmRemovedEvent(sessionEvent.getRealmId());
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
cluster.registerListener(CLIENT_REMOVED_AUTHSESSION_EVENT, new AbstractAuthSessionClusterListener<ClientRemovedSessionEvent>(sessionFactory) {
|
||||
|
||||
@Override
|
||||
protected void eventReceived(KeycloakSession session, InfinispanAuthenticationSessionProvider provider, ClientRemovedSessionEvent sessionEvent) {
|
||||
provider.onClientRemovedEvent(sessionEvent.getRealmId(), sessionEvent.getClientUuid());
|
||||
}
|
||||
});
|
||||
|
||||
log.debug("Registered cluster listeners");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public AuthenticationSessionProvider create(KeycloakSession session) {
|
||||
lazyInit(session);
|
||||
|
@ -98,16 +150,12 @@ public class InfinispanAuthenticationSessionProviderFactory implements Authentic
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postInit(KeycloakSessionFactory factory) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getId() {
|
||||
return "infinispan";
|
||||
return PROVIDER_ID;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -155,7 +155,7 @@ public class InfinispanKeycloakTransaction implements KeycloakTransaction {
|
|||
theTaskKey = taskKey + "-" + (i++);
|
||||
}
|
||||
|
||||
tasks.put(taskKey, () -> clusterProvider.notify(taskKey, event, ignoreSender));
|
||||
tasks.put(taskKey, () -> clusterProvider.notify(taskKey, event, ignoreSender, ClusterProvider.DCNotify.ALL_DCS));
|
||||
}
|
||||
|
||||
public <K, V> void remove(Cache<K, V> cache, K key) {
|
||||
|
@ -168,7 +168,7 @@ public class InfinispanKeycloakTransaction implements KeycloakTransaction {
|
|||
// This is for possibility to lookup for session by id, which was created in this transaction
|
||||
public <K, V> V get(Cache<K, V> cache, K key) {
|
||||
Object taskKey = getTaskKey(cache, key);
|
||||
CacheTask<V> current = tasks.get(taskKey);
|
||||
CacheTask current = tasks.get(taskKey);
|
||||
if (current != null) {
|
||||
if (current instanceof CacheTaskWithValue) {
|
||||
return ((CacheTaskWithValue<V>) current).getValue();
|
||||
|
@ -190,11 +190,11 @@ public class InfinispanKeycloakTransaction implements KeycloakTransaction {
|
|||
}
|
||||
}
|
||||
|
||||
public interface CacheTask<V> {
|
||||
public interface CacheTask {
|
||||
void execute();
|
||||
}
|
||||
|
||||
public abstract class CacheTaskWithValue<V> implements CacheTask<V> {
|
||||
public abstract class CacheTaskWithValue<V> implements CacheTask {
|
||||
protected V value;
|
||||
|
||||
public CacheTaskWithValue(V value) {
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.keycloak.Config;
|
|||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.sessions.infinispan.util.InfinispanUtil;
|
||||
import org.keycloak.sessions.StickySessionEncoderProvider;
|
||||
import org.keycloak.sessions.StickySessionEncoderProviderFactory;
|
||||
|
||||
|
@ -29,16 +30,22 @@ import org.keycloak.sessions.StickySessionEncoderProviderFactory;
|
|||
*/
|
||||
public class InfinispanStickySessionEncoderProviderFactory implements StickySessionEncoderProviderFactory {
|
||||
|
||||
private String myNodeName;
|
||||
|
||||
@Override
|
||||
public StickySessionEncoderProvider create(KeycloakSession session) {
|
||||
String myNodeName = InfinispanUtil.getMyAddress(session);
|
||||
|
||||
if (myNodeName != null && myNodeName.startsWith(InfinispanConnectionProvider.NODE_PREFIX)) {
|
||||
|
||||
// Node name was randomly generated. We won't use anything for sticky sessions in this case
|
||||
myNodeName = null;
|
||||
}
|
||||
|
||||
return new InfinispanStickySessionEncoderProvider(session, myNodeName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(Config.Scope config) {
|
||||
myNodeName = config.get("nodeName", System.getProperty(InfinispanConnectionProvider.JBOSS_NODE_NAME));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,10 +18,11 @@
|
|||
package org.keycloak.models.sessions.infinispan;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.CacheStream;
|
||||
import org.infinispan.client.hotrod.RemoteCache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.common.util.Time;
|
||||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
import org.keycloak.models.AuthenticatedClientSessionModel;
|
||||
import org.keycloak.models.ClientModel;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
|
@ -31,19 +32,27 @@ import org.keycloak.models.UserModel;
|
|||
import org.keycloak.models.UserSessionModel;
|
||||
import org.keycloak.models.UserSessionProvider;
|
||||
import org.keycloak.models.session.UserSessionPersisterProvider;
|
||||
import org.keycloak.models.sessions.infinispan.changes.sessions.LastSessionRefreshStore;
|
||||
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.changes.InfinispanChangelogBasedTransaction;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask;
|
||||
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.LoginFailureEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.LoginFailureKey;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.events.ClientRemovedSessionEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.RemoveAllUserLoginFailuresEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.RemoveUserSessionsEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.SessionEventsSenderTransaction;
|
||||
import org.keycloak.models.sessions.infinispan.stream.Comparators;
|
||||
import org.keycloak.models.sessions.infinispan.stream.Mappers;
|
||||
import org.keycloak.models.sessions.infinispan.stream.SessionPredicate;
|
||||
import org.keycloak.models.sessions.infinispan.stream.UserLoginFailurePredicate;
|
||||
import org.keycloak.models.sessions.infinispan.stream.UserSessionPredicate;
|
||||
import org.keycloak.models.sessions.infinispan.util.InfinispanUtil;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
@ -51,7 +60,6 @@ import java.util.Map;
|
|||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
|
@ -62,31 +70,71 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
private static final Logger log = Logger.getLogger(InfinispanUserSessionProvider.class);
|
||||
|
||||
protected final KeycloakSession session;
|
||||
protected final Cache<String, SessionEntity> sessionCache;
|
||||
protected final Cache<String, SessionEntity> offlineSessionCache;
|
||||
|
||||
protected final Cache<String, SessionEntityWrapper<UserSessionEntity>> sessionCache;
|
||||
protected final Cache<String, SessionEntityWrapper<UserSessionEntity>> offlineSessionCache;
|
||||
protected final Cache<LoginFailureKey, LoginFailureEntity> loginFailureCache;
|
||||
|
||||
protected final InfinispanChangelogBasedTransaction<UserSessionEntity> sessionTx;
|
||||
protected final InfinispanChangelogBasedTransaction<UserSessionEntity> offlineSessionTx;
|
||||
protected final InfinispanKeycloakTransaction tx;
|
||||
|
||||
public InfinispanUserSessionProvider(KeycloakSession session, Cache<String, SessionEntity> sessionCache, Cache<String, SessionEntity> offlineSessionCache,
|
||||
protected final SessionEventsSenderTransaction clusterEventsSenderTx;
|
||||
|
||||
protected final LastSessionRefreshStore lastSessionRefreshStore;
|
||||
protected final LastSessionRefreshStore offlineLastSessionRefreshStore;
|
||||
|
||||
public InfinispanUserSessionProvider(KeycloakSession session,
|
||||
RemoteCacheInvoker remoteCacheInvoker,
|
||||
LastSessionRefreshStore lastSessionRefreshStore,
|
||||
LastSessionRefreshStore offlineLastSessionRefreshStore,
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> sessionCache,
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> offlineSessionCache,
|
||||
Cache<LoginFailureKey, LoginFailureEntity> loginFailureCache) {
|
||||
this.session = session;
|
||||
|
||||
this.sessionCache = sessionCache;
|
||||
this.offlineSessionCache = offlineSessionCache;
|
||||
this.loginFailureCache = loginFailureCache;
|
||||
|
||||
this.sessionTx = new InfinispanChangelogBasedTransaction<>(session, InfinispanConnectionProvider.SESSION_CACHE_NAME, sessionCache, remoteCacheInvoker);
|
||||
this.offlineSessionTx = new InfinispanChangelogBasedTransaction<>(session, InfinispanConnectionProvider.OFFLINE_SESSION_CACHE_NAME, offlineSessionCache, remoteCacheInvoker);
|
||||
|
||||
this.tx = new InfinispanKeycloakTransaction();
|
||||
|
||||
this.clusterEventsSenderTx = new SessionEventsSenderTransaction(session);
|
||||
|
||||
this.lastSessionRefreshStore = lastSessionRefreshStore;
|
||||
this.offlineLastSessionRefreshStore = offlineLastSessionRefreshStore;
|
||||
|
||||
session.getTransactionManager().enlistAfterCompletion(tx);
|
||||
session.getTransactionManager().enlistAfterCompletion(clusterEventsSenderTx);
|
||||
session.getTransactionManager().enlistAfterCompletion(sessionTx);
|
||||
session.getTransactionManager().enlistAfterCompletion(offlineSessionTx);
|
||||
}
|
||||
|
||||
protected Cache<String, SessionEntity> getCache(boolean offline) {
|
||||
protected Cache<String, SessionEntityWrapper<UserSessionEntity>> getCache(boolean offline) {
|
||||
return offline ? offlineSessionCache : sessionCache;
|
||||
}
|
||||
|
||||
protected InfinispanChangelogBasedTransaction<UserSessionEntity> getTransaction(boolean offline) {
|
||||
return offline ? offlineSessionTx : sessionTx;
|
||||
}
|
||||
|
||||
protected LastSessionRefreshStore getLastSessionRefreshStore() {
|
||||
return lastSessionRefreshStore;
|
||||
}
|
||||
|
||||
protected LastSessionRefreshStore getOfflineLastSessionRefreshStore() {
|
||||
return offlineLastSessionRefreshStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AuthenticatedClientSessionModel createClientSession(RealmModel realm, ClientModel client, UserSessionModel userSession) {
|
||||
AuthenticatedClientSessionEntity entity = new AuthenticatedClientSessionEntity();
|
||||
|
||||
AuthenticatedClientSessionAdapter adapter = new AuthenticatedClientSessionAdapter(entity, client, (UserSessionAdapter) userSession, this, sessionCache);
|
||||
InfinispanChangelogBasedTransaction<UserSessionEntity> updateTx = getTransaction(false);
|
||||
AuthenticatedClientSessionAdapter adapter = new AuthenticatedClientSessionAdapter(entity, client, (UserSessionAdapter) userSession, this, updateTx);
|
||||
adapter.setUserSession(userSession);
|
||||
return adapter;
|
||||
}
|
||||
|
@ -95,10 +143,28 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
public UserSessionModel createUserSession(String id, RealmModel realm, UserModel user, String loginUsername, String ipAddress, String authMethod, boolean rememberMe, String brokerSessionId, String brokerUserId) {
|
||||
UserSessionEntity entity = new UserSessionEntity();
|
||||
entity.setId(id);
|
||||
|
||||
updateSessionEntity(entity, realm, user, loginUsername, ipAddress, authMethod, rememberMe, brokerSessionId, brokerUserId);
|
||||
|
||||
tx.putIfAbsent(sessionCache, id, entity);
|
||||
SessionUpdateTask<UserSessionEntity> createSessionTask = new SessionUpdateTask<UserSessionEntity>() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity session) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public CacheOperation getOperation(UserSessionEntity session) {
|
||||
return CacheOperation.ADD_IF_ABSENT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
|
||||
return CrossDCMessageStatus.SYNC;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
sessionTx.addTask(id, createSessionTask, entity);
|
||||
|
||||
return wrap(realm, entity, false);
|
||||
}
|
||||
|
@ -121,31 +187,43 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public UserSessionModel getUserSession(RealmModel realm, String id) {
|
||||
return getUserSession(realm, id, false);
|
||||
}
|
||||
|
||||
protected UserSessionAdapter getUserSession(RealmModel realm, String id, boolean offline) {
|
||||
Cache<String, SessionEntity> cache = getCache(offline);
|
||||
UserSessionEntity entity = (UserSessionEntity) tx.get(cache, id); // Chance created in this transaction
|
||||
|
||||
if (entity == null) {
|
||||
entity = (UserSessionEntity) cache.get(id);
|
||||
}
|
||||
|
||||
UserSessionEntity entity = getUserSessionEntity(id, offline);
|
||||
return wrap(realm, entity, offline);
|
||||
}
|
||||
|
||||
protected List<UserSessionModel> getUserSessions(RealmModel realm, Predicate<Map.Entry<String, SessionEntity>> predicate, boolean offline) {
|
||||
CacheStream<Map.Entry<String, SessionEntity>> cacheStream = getCache(offline).entrySet().stream();
|
||||
Iterator<Map.Entry<String, SessionEntity>> itr = cacheStream.filter(predicate).iterator();
|
||||
List<UserSessionModel> sessions = new LinkedList<>();
|
||||
private UserSessionEntity getUserSessionEntity(String id, boolean offline) {
|
||||
InfinispanChangelogBasedTransaction<UserSessionEntity> tx = getTransaction(offline);
|
||||
SessionEntityWrapper<UserSessionEntity> entityWrapper = tx.get(id);
|
||||
return entityWrapper==null ? null : entityWrapper.getEntity();
|
||||
}
|
||||
|
||||
|
||||
protected List<UserSessionModel> getUserSessions(RealmModel realm, Predicate<Map.Entry<String, SessionEntityWrapper<UserSessionEntity>>> predicate, boolean offline) {
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
|
||||
|
||||
cache = CacheDecorators.skipCacheLoaders(cache);
|
||||
|
||||
Stream<Map.Entry<String, SessionEntityWrapper<UserSessionEntity>>> cacheStream = cache.entrySet().stream();
|
||||
|
||||
List<UserSessionModel> resultSessions = new LinkedList<>();
|
||||
|
||||
Iterator<UserSessionEntity> itr = cacheStream.filter(predicate)
|
||||
.map(Mappers.userSessionEntity())
|
||||
.iterator();
|
||||
|
||||
while (itr.hasNext()) {
|
||||
UserSessionEntity e = (UserSessionEntity) itr.next().getValue();
|
||||
sessions.add(wrap(realm, e, offline));
|
||||
UserSessionEntity userSessionEntity = itr.next();
|
||||
resultSessions.add(wrap(realm, userSessionEntity, offline));
|
||||
}
|
||||
return sessions;
|
||||
|
||||
return resultSessions;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -175,65 +253,90 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
}
|
||||
|
||||
protected List<UserSessionModel> getUserSessions(final RealmModel realm, ClientModel client, int firstResult, int maxResults, final boolean offline) {
|
||||
final Cache<String, SessionEntity> cache = getCache(offline);
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
|
||||
|
||||
cache = CacheDecorators.skipCacheLoaders(cache);
|
||||
|
||||
Stream<UserSessionEntity> stream = cache.entrySet().stream()
|
||||
.filter(UserSessionPredicate.create(realm.getId()).client(client.getId()))
|
||||
.map(Mappers.userSessionEntity())
|
||||
.sorted(Comparators.userSessionLastSessionRefresh());
|
||||
|
||||
// Doesn't work due to ISPN-6575 . TODO Fix once infinispan upgraded to 8.2.2.Final or 9.0
|
||||
// if (firstResult > 0) {
|
||||
// stream = stream.skip(firstResult);
|
||||
// }
|
||||
//
|
||||
// if (maxResults > 0) {
|
||||
// stream = stream.limit(maxResults);
|
||||
// }
|
||||
//
|
||||
// List<UserSessionEntity> entities = stream.collect(Collectors.toList());
|
||||
|
||||
|
||||
// Workaround for ISPN-6575 TODO Fix once infinispan upgraded to 8.2.2.Final or 9.0 and replace with the more effective code above
|
||||
if (firstResult < 0) {
|
||||
firstResult = 0;
|
||||
}
|
||||
if (maxResults < 0) {
|
||||
maxResults = Integer.MAX_VALUE;
|
||||
if (firstResult > 0) {
|
||||
stream = stream.skip(firstResult);
|
||||
}
|
||||
|
||||
int count = firstResult + maxResults;
|
||||
if (count > 0) {
|
||||
stream = stream.limit(count);
|
||||
if (maxResults > 0) {
|
||||
stream = stream.limit(maxResults);
|
||||
}
|
||||
List<UserSessionEntity> entities = stream.collect(Collectors.toList());
|
||||
|
||||
if (firstResult > entities.size()) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
maxResults = Math.min(maxResults, entities.size() - firstResult);
|
||||
entities = entities.subList(firstResult, firstResult + maxResults);
|
||||
|
||||
|
||||
final List<UserSessionModel> sessions = new LinkedList<>();
|
||||
entities.stream().forEach(new Consumer<UserSessionEntity>() {
|
||||
@Override
|
||||
public void accept(UserSessionEntity userSessionEntity) {
|
||||
sessions.add(wrap(realm, userSessionEntity, offline));
|
||||
}
|
||||
});
|
||||
Iterator<UserSessionEntity> itr = stream.iterator();
|
||||
|
||||
while (itr.hasNext()) {
|
||||
UserSessionEntity userSessionEntity = itr.next();
|
||||
sessions.add(wrap(realm, userSessionEntity, offline));
|
||||
}
|
||||
|
||||
|
||||
return sessions;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public UserSessionModel getUserSessionWithPredicate(RealmModel realm, String id, boolean offline, Predicate<UserSessionModel> predicate) {
|
||||
UserSessionModel userSession = getUserSession(realm, id, offline);
|
||||
if (userSession == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// We have userSession, which passes predicate. No need for remote lookup.
|
||||
if (predicate.test(userSession)) {
|
||||
return userSession;
|
||||
}
|
||||
|
||||
// Try lookup userSession from remoteCache
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
|
||||
RemoteCache remoteCache = InfinispanUtil.getRemoteCache(cache);
|
||||
|
||||
if (remoteCache != null) {
|
||||
UserSessionEntity remoteSessionEntity = (UserSessionEntity) remoteCache.get(id);
|
||||
if (remoteSessionEntity != null) {
|
||||
|
||||
UserSessionModel remoteSessionAdapter = wrap(realm, remoteSessionEntity, offline);
|
||||
if (predicate.test(remoteSessionAdapter)) {
|
||||
|
||||
InfinispanChangelogBasedTransaction<UserSessionEntity> tx = getTransaction(offline);
|
||||
|
||||
// Remote entity contains our predicate. Update local cache with the remote entity
|
||||
SessionEntityWrapper<UserSessionEntity> sessionWrapper = remoteSessionEntity.mergeRemoteEntityWithLocalEntity(tx.get(id));
|
||||
|
||||
// Replace entity just in ispn cache. Skip remoteStore
|
||||
cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD, Flag.IGNORE_RETURN_VALUES)
|
||||
.replace(id, sessionWrapper);
|
||||
|
||||
tx.reloadEntityInCurrentTransaction(realm, id, sessionWrapper);
|
||||
|
||||
// Recursion. We should have it locally now
|
||||
return getUserSessionWithPredicate(realm, id, offline, predicate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long getActiveUserSessions(RealmModel realm, ClientModel client) {
|
||||
return getUserSessionsCount(realm, client, false);
|
||||
}
|
||||
|
||||
protected long getUserSessionsCount(RealmModel realm, ClientModel client, boolean offline) {
|
||||
return getCache(offline).entrySet().stream()
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
|
||||
cache = CacheDecorators.skipCacheLoaders(cache);
|
||||
|
||||
return cache.entrySet().stream()
|
||||
.filter(UserSessionPredicate.create(realm.getId()).client(client.getId()))
|
||||
.count();
|
||||
}
|
||||
|
@ -242,7 +345,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
public void removeUserSession(RealmModel realm, UserSessionModel session) {
|
||||
UserSessionEntity entity = getUserSessionEntity(session, false);
|
||||
if (entity != null) {
|
||||
removeUserSession(realm, entity, false);
|
||||
removeUserSession(entity, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -252,12 +355,15 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
}
|
||||
|
||||
protected void removeUserSessions(RealmModel realm, UserModel user, boolean offline) {
|
||||
Cache<String, SessionEntity> cache = getCache(offline);
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
|
||||
|
||||
cache = CacheDecorators.skipCacheLoaders(cache);
|
||||
|
||||
Iterator<UserSessionEntity> itr = cache.entrySet().stream().filter(UserSessionPredicate.create(realm.getId()).user(user.getId())).map(Mappers.userSessionEntity()).iterator();
|
||||
|
||||
Iterator<SessionEntity> itr = cache.entrySet().stream().filter(UserSessionPredicate.create(realm.getId()).user(user.getId())).map(Mappers.sessionEntity()).iterator();
|
||||
while (itr.hasNext()) {
|
||||
UserSessionEntity userSessionEntity = (UserSessionEntity) itr.next();
|
||||
removeUserSession(realm, userSessionEntity, offline);
|
||||
UserSessionEntity userSessionEntity = itr.next();
|
||||
removeUserSession(userSessionEntity, offline);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -273,17 +379,30 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
int expiredRefresh = Time.currentTime() - realm.getSsoSessionIdleTimeout();
|
||||
|
||||
// Each cluster node cleanups just local sessions, which are those owned by himself (+ few more taking l1 cache into account)
|
||||
Iterator<Map.Entry<String, SessionEntity>> itr = sessionCache.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL)
|
||||
.entrySet().stream().filter(UserSessionPredicate.create(realm.getId()).expired(expired, expiredRefresh)).iterator();
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCache = CacheDecorators.localCache(sessionCache);
|
||||
|
||||
int counter = 0;
|
||||
while (itr.hasNext()) {
|
||||
counter++;
|
||||
UserSessionEntity entity = (UserSessionEntity) itr.next().getValue();
|
||||
tx.remove(sessionCache, entity.getId());
|
||||
}
|
||||
int[] counter = { 0 };
|
||||
|
||||
log.debugf("Removed %d expired user sessions for realm '%s'", counter, realm.getName());
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCacheStoreIgnore = CacheDecorators.skipCacheLoaders(localCache);
|
||||
|
||||
// Ignore remoteStore for stream iteration. But we will invoke remoteStore for userSession removal propagate
|
||||
localCacheStoreIgnore
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(UserSessionPredicate.create(realm.getId()).expired(expired, expiredRefresh))
|
||||
.map(Mappers.sessionId())
|
||||
.forEach(new Consumer<String>() {
|
||||
|
||||
@Override
|
||||
public void accept(String sessionId) {
|
||||
counter[0]++;
|
||||
tx.remove(localCache, sessionId);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
|
||||
log.debugf("Removed %d expired user sessions for realm '%s'", counter[0], realm.getName());
|
||||
}
|
||||
|
||||
private void removeExpiredOfflineUserSessions(RealmModel realm) {
|
||||
|
@ -291,38 +410,69 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
int expiredOffline = Time.currentTime() - realm.getOfflineSessionIdleTimeout();
|
||||
|
||||
// Each cluster node cleanups just local sessions, which are those owned by himself (+ few more taking l1 cache into account)
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCache = CacheDecorators.localCache(offlineSessionCache);
|
||||
|
||||
UserSessionPredicate predicate = UserSessionPredicate.create(realm.getId()).expired(null, expiredOffline);
|
||||
Iterator<Map.Entry<String, SessionEntity>> itr = offlineSessionCache.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL)
|
||||
.entrySet().stream().filter(predicate).iterator();
|
||||
|
||||
int counter = 0;
|
||||
while (itr.hasNext()) {
|
||||
counter++;
|
||||
UserSessionEntity entity = (UserSessionEntity) itr.next().getValue();
|
||||
tx.remove(offlineSessionCache, entity.getId());
|
||||
final int[] counter = { 0 };
|
||||
|
||||
persister.removeUserSession(entity.getId(), true);
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCacheStoreIgnore = CacheDecorators.skipCacheLoaders(localCache);
|
||||
|
||||
for (String clientUUID : entity.getAuthenticatedClientSessions().keySet()) {
|
||||
persister.removeClientSession(entity.getId(), clientUUID, true);
|
||||
}
|
||||
}
|
||||
// Ignore remoteStore for stream iteration. But we will invoke remoteStore for userSession removal propagate
|
||||
localCacheStoreIgnore
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(predicate)
|
||||
.map(Mappers.userSessionEntity())
|
||||
.forEach(new Consumer<UserSessionEntity>() {
|
||||
|
||||
@Override
|
||||
public void accept(UserSessionEntity userSessionEntity) {
|
||||
counter[0]++;
|
||||
tx.remove(localCache, userSessionEntity.getId());
|
||||
|
||||
// TODO:mposolda can be likely optimized to delete all expired at one step
|
||||
persister.removeUserSession( userSessionEntity.getId(), true);
|
||||
|
||||
// TODO can be likely optimized to delete all at one step
|
||||
for (String clientUUID : userSessionEntity.getAuthenticatedClientSessions().keySet()) {
|
||||
persister.removeClientSession(userSessionEntity.getId(), clientUUID, true);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
log.debugf("Removed %d expired offline user sessions for realm '%s'", counter, realm.getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeUserSessions(RealmModel realm) {
|
||||
removeUserSessions(realm, false);
|
||||
// Don't send message to all DCs, just to all cluster nodes in current DC. The remoteCache will notify client listeners for removed userSessions. This assumes that 2nd DC contains same userSessions like current one.
|
||||
clusterEventsSenderTx.addEvent(InfinispanUserSessionProviderFactory.REMOVE_USER_SESSIONS_EVENT, RemoveUserSessionsEvent.create(realm.getId()), false);
|
||||
}
|
||||
|
||||
protected void removeUserSessions(RealmModel realm, boolean offline) {
|
||||
Cache<String, SessionEntity> cache = getCache(offline);
|
||||
protected void onRemoveUserSessionsEvent(String realmId) {
|
||||
removeLocalUserSessions(realmId, false);
|
||||
}
|
||||
|
||||
Iterator<String> itr = cache.entrySet().stream().filter(SessionPredicate.create(realm.getId())).map(Mappers.sessionId()).iterator();
|
||||
while (itr.hasNext()) {
|
||||
cache.remove(itr.next());
|
||||
}
|
||||
private void removeLocalUserSessions(String realmId, boolean offline) {
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCache = CacheDecorators.localCache(cache);
|
||||
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCacheStoreIgnore = CacheDecorators.skipCacheLoaders(localCache);
|
||||
|
||||
localCacheStoreIgnore
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(SessionPredicate.create(realmId))
|
||||
.map(Mappers.sessionId())
|
||||
.forEach(new Consumer<String>() {
|
||||
|
||||
@Override
|
||||
public void accept(String sessionId) {
|
||||
localCache.remove(sessionId);
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -348,22 +498,48 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
|
||||
@Override
|
||||
public void removeAllUserLoginFailures(RealmModel realm) {
|
||||
Iterator<LoginFailureKey> itr = loginFailureCache.entrySet().stream().filter(UserLoginFailurePredicate.create(realm.getId())).map(Mappers.loginFailureId()).iterator();
|
||||
clusterEventsSenderTx.addEvent(InfinispanUserSessionProviderFactory.REMOVE_ALL_LOGIN_FAILURES_EVENT, RemoveAllUserLoginFailuresEvent.create(realm.getId()), false);
|
||||
}
|
||||
|
||||
protected void onRemoveAllUserLoginFailuresEvent(String realmId) {
|
||||
removeAllLocalUserLoginFailuresEvent(realmId);
|
||||
}
|
||||
|
||||
private void removeAllLocalUserLoginFailuresEvent(String realmId) {
|
||||
Cache<LoginFailureKey, LoginFailureEntity> localCache = CacheDecorators.localCache(loginFailureCache);
|
||||
|
||||
Cache<LoginFailureKey, LoginFailureEntity> localCacheStoreIgnore = CacheDecorators.skipCacheLoaders(localCache);
|
||||
|
||||
Iterator<LoginFailureKey> itr = localCacheStoreIgnore
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(UserLoginFailurePredicate.create(realmId))
|
||||
.map(Mappers.loginFailureId())
|
||||
.iterator();
|
||||
|
||||
while (itr.hasNext()) {
|
||||
LoginFailureKey key = itr.next();
|
||||
tx.remove(loginFailureCache, key);
|
||||
localCache.remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRealmRemoved(RealmModel realm) {
|
||||
removeUserSessions(realm, true);
|
||||
removeUserSessions(realm, false);
|
||||
removeAllUserLoginFailures(realm);
|
||||
clusterEventsSenderTx.addEvent(InfinispanUserSessionProviderFactory.REALM_REMOVED_SESSION_EVENT, RealmRemovedSessionEvent.create(realm.getId()), false);
|
||||
}
|
||||
|
||||
protected void onRealmRemovedEvent(String realmId) {
|
||||
removeLocalUserSessions(realmId, true);
|
||||
removeLocalUserSessions(realmId, false);
|
||||
removeAllLocalUserLoginFailuresEvent(realmId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClientRemoved(RealmModel realm, ClientModel client) {
|
||||
clusterEventsSenderTx.addEvent(InfinispanUserSessionProviderFactory.CLIENT_REMOVED_SESSION_EVENT, ClientRemovedSessionEvent.create(realm.getId(), client.getId()), false);
|
||||
}
|
||||
|
||||
protected void onClientRemovedEvent(String realmId, String clientUuid) {
|
||||
// Nothing for now. userSession.getAuthenticatedClientSessions() will check lazily if particular client exists and update userSession on-the-fly.
|
||||
}
|
||||
|
||||
|
@ -380,10 +556,29 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
public void close() {
|
||||
}
|
||||
|
||||
protected void removeUserSession(RealmModel realm, UserSessionEntity sessionEntity, boolean offline) {
|
||||
Cache<String, SessionEntity> cache = getCache(offline);
|
||||
protected void removeUserSession(UserSessionEntity sessionEntity, boolean offline) {
|
||||
InfinispanChangelogBasedTransaction<UserSessionEntity> tx = getTransaction(offline);
|
||||
|
||||
tx.remove(cache, sessionEntity.getId());
|
||||
SessionUpdateTask<UserSessionEntity> removeTask = new SessionUpdateTask<UserSessionEntity>() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity entity) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public CacheOperation getOperation(UserSessionEntity entity) {
|
||||
return CacheOperation.REMOVE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
|
||||
return CrossDCMessageStatus.SYNC;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
tx.addTask(sessionEntity.getId(), removeTask);
|
||||
}
|
||||
|
||||
InfinispanKeycloakTransaction getTx() {
|
||||
|
@ -391,16 +586,8 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
}
|
||||
|
||||
UserSessionAdapter wrap(RealmModel realm, UserSessionEntity entity, boolean offline) {
|
||||
Cache<String, SessionEntity> cache = getCache(offline);
|
||||
return entity != null ? new UserSessionAdapter(session, this, cache, realm, entity, offline) : null;
|
||||
}
|
||||
|
||||
List<UserSessionModel> wrapUserSessions(RealmModel realm, Collection<UserSessionEntity> entities, boolean offline) {
|
||||
List<UserSessionModel> models = new LinkedList<>();
|
||||
for (UserSessionEntity e : entities) {
|
||||
models.add(wrap(realm, e, offline));
|
||||
}
|
||||
return models;
|
||||
InfinispanChangelogBasedTransaction<UserSessionEntity> tx = getTransaction(offline);
|
||||
return entity != null ? new UserSessionAdapter(session, this, tx, realm, entity, offline) : null;
|
||||
}
|
||||
|
||||
UserLoginFailureModel wrap(LoginFailureKey key, LoginFailureEntity entity) {
|
||||
|
@ -411,8 +598,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
if (userSession instanceof UserSessionAdapter) {
|
||||
return ((UserSessionAdapter) userSession).getEntity();
|
||||
} else {
|
||||
Cache<String, SessionEntity> cache = getCache(offline);
|
||||
return cache != null ? (UserSessionEntity) cache.get(userSession.getId()) : null;
|
||||
return getUserSessionEntity(userSession.getId(), offline);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -438,7 +624,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
public void removeOfflineUserSession(RealmModel realm, UserSessionModel userSession) {
|
||||
UserSessionEntity userSessionEntity = getUserSessionEntity(userSession, true);
|
||||
if (userSessionEntity != null) {
|
||||
removeUserSession(realm, userSessionEntity, true);
|
||||
removeUserSession(userSessionEntity, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -449,7 +635,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
UserSessionAdapter userSessionAdapter = (offlineUserSession instanceof UserSessionAdapter) ? (UserSessionAdapter) offlineUserSession :
|
||||
getOfflineUserSession(offlineUserSession.getRealm(), offlineUserSession.getId());
|
||||
|
||||
AuthenticatedClientSessionAdapter offlineClientSession = importClientSession(userSessionAdapter, clientSession);
|
||||
AuthenticatedClientSessionAdapter offlineClientSession = importClientSession(userSessionAdapter, clientSession, getTransaction(true));
|
||||
|
||||
// update timestamp to current time
|
||||
offlineClientSession.setTimestamp(Time.currentTime());
|
||||
|
@ -459,12 +645,18 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
|
||||
@Override
|
||||
public List<UserSessionModel> getOfflineUserSessions(RealmModel realm, UserModel user) {
|
||||
Iterator<Map.Entry<String, SessionEntity>> itr = offlineSessionCache.entrySet().stream().filter(UserSessionPredicate.create(realm.getId()).user(user.getId())).iterator();
|
||||
List<UserSessionModel> userSessions = new LinkedList<>();
|
||||
|
||||
while(itr.hasNext()) {
|
||||
UserSessionEntity entity = (UserSessionEntity) itr.next().getValue();
|
||||
UserSessionModel userSession = wrap(realm, entity, true);
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = CacheDecorators.skipCacheLoaders(offlineSessionCache);
|
||||
|
||||
Iterator<UserSessionEntity> itr = cache.entrySet().stream()
|
||||
.filter(UserSessionPredicate.create(realm.getId()).user(user.getId()))
|
||||
.map(Mappers.userSessionEntity())
|
||||
.iterator();
|
||||
|
||||
while (itr.hasNext()) {
|
||||
UserSessionEntity userSessionEntity = itr.next();
|
||||
UserSessionModel userSession = wrap(realm, userSessionEntity, true);
|
||||
userSessions.add(userSession);
|
||||
}
|
||||
|
||||
|
@ -492,7 +684,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
entity.setBrokerUserId(userSession.getBrokerUserId());
|
||||
entity.setIpAddress(userSession.getIpAddress());
|
||||
entity.setLoginUsername(userSession.getLoginUsername());
|
||||
entity.setNotes(userSession.getNotes()== null ? new ConcurrentHashMap<>() : userSession.getNotes());
|
||||
entity.setNotes(userSession.getNotes() == null ? new ConcurrentHashMap<>() : userSession.getNotes());
|
||||
entity.setAuthenticatedClientSessions(new ConcurrentHashMap<>());
|
||||
entity.setRememberMe(userSession.isRememberMe());
|
||||
entity.setState(userSession.getState());
|
||||
|
@ -502,14 +694,34 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
entity.setLastSessionRefresh(userSession.getLastSessionRefresh());
|
||||
|
||||
|
||||
Cache<String, SessionEntity> cache = getCache(offline);
|
||||
tx.put(cache, userSession.getId(), entity);
|
||||
InfinispanChangelogBasedTransaction<UserSessionEntity> tx = getTransaction(offline);
|
||||
|
||||
SessionUpdateTask importTask = new SessionUpdateTask<UserSessionEntity>() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity session) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public CacheOperation getOperation(UserSessionEntity session) {
|
||||
return CacheOperation.ADD_IF_ABSENT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
|
||||
return CrossDCMessageStatus.SYNC;
|
||||
}
|
||||
|
||||
};
|
||||
tx.addTask(userSession.getId(), importTask, entity);
|
||||
|
||||
UserSessionAdapter importedSession = wrap(userSession.getRealm(), entity, offline);
|
||||
|
||||
// Handle client sessions
|
||||
if (importAuthenticatedClientSessions) {
|
||||
for (AuthenticatedClientSessionModel clientSession : userSession.getAuthenticatedClientSessions().values()) {
|
||||
importClientSession(importedSession, clientSession);
|
||||
importClientSession(importedSession, clientSession, tx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -517,25 +729,46 @@ public class InfinispanUserSessionProvider implements UserSessionProvider {
|
|||
}
|
||||
|
||||
|
||||
private AuthenticatedClientSessionAdapter importClientSession(UserSessionAdapter importedUserSession, AuthenticatedClientSessionModel clientSession) {
|
||||
private AuthenticatedClientSessionAdapter importClientSession(UserSessionAdapter importedUserSession, AuthenticatedClientSessionModel clientSession,
|
||||
InfinispanChangelogBasedTransaction<UserSessionEntity> updateTx) {
|
||||
AuthenticatedClientSessionEntity entity = new AuthenticatedClientSessionEntity();
|
||||
|
||||
entity.setAction(clientSession.getAction());
|
||||
entity.setAuthMethod(clientSession.getProtocol());
|
||||
|
||||
entity.setNotes(clientSession.getNotes());
|
||||
entity.setNotes(clientSession.getNotes() == null ? new ConcurrentHashMap<>() : clientSession.getNotes());
|
||||
entity.setProtocolMappers(clientSession.getProtocolMappers());
|
||||
entity.setRedirectUri(clientSession.getRedirectUri());
|
||||
entity.setRoles(clientSession.getRoles());
|
||||
entity.setTimestamp(clientSession.getTimestamp());
|
||||
|
||||
|
||||
Map<String, AuthenticatedClientSessionEntity> clientSessions = importedUserSession.getEntity().getAuthenticatedClientSessions();
|
||||
|
||||
clientSessions.put(clientSession.getClient().getId(), entity);
|
||||
|
||||
importedUserSession.update();
|
||||
SessionUpdateTask importTask = new SessionUpdateTask<UserSessionEntity>() {
|
||||
|
||||
return new AuthenticatedClientSessionAdapter(entity, clientSession.getClient(), importedUserSession, this, importedUserSession.getCache());
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity session) {
|
||||
Map<String, AuthenticatedClientSessionEntity> clientSessions = session.getAuthenticatedClientSessions();
|
||||
clientSessions.put(clientSession.getClient().getId(), entity);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CacheOperation getOperation(UserSessionEntity session) {
|
||||
return CacheOperation.REPLACE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
|
||||
return CrossDCMessageStatus.SYNC;
|
||||
}
|
||||
|
||||
};
|
||||
updateTx.addTask(importedUserSession.getId(), importTask);
|
||||
|
||||
return new AuthenticatedClientSessionAdapter(entity, clientSession.getClient(), importedUserSession, this, updateTx);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,41 +18,76 @@
|
|||
package org.keycloak.models.sessions.infinispan;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.client.hotrod.RemoteCache;
|
||||
import org.infinispan.persistence.remote.RemoteStore;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.Config;
|
||||
import org.keycloak.cluster.ClusterProvider;
|
||||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.KeycloakSessionTask;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.UserModel;
|
||||
import org.keycloak.models.UserSessionProvider;
|
||||
import org.keycloak.models.UserSessionProviderFactory;
|
||||
import org.keycloak.models.sessions.infinispan.changes.sessions.LastSessionRefreshStore;
|
||||
import org.keycloak.models.sessions.infinispan.changes.sessions.LastSessionRefreshStoreFactory;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.BaseCacheInitializer;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.CacheInitializer;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.DBLockBasedCacheInitializer;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.SingleWorkerCacheInitializer;
|
||||
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.LoginFailureEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.LoginFailureKey;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.InfinispanUserSessionInitializer;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.OfflineUserSessionLoader;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.events.AbstractUserSessionClusterListener;
|
||||
import org.keycloak.models.sessions.infinispan.events.ClientRemovedSessionEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.RemoveAllUserLoginFailuresEvent;
|
||||
import org.keycloak.models.sessions.infinispan.events.RemoveUserSessionsEvent;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.InfinispanCacheInitializer;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.OfflinePersistentUserSessionLoader;
|
||||
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionListener;
|
||||
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionsLoader;
|
||||
import org.keycloak.models.sessions.infinispan.util.InfinispanUtil;
|
||||
import org.keycloak.models.utils.KeycloakModelUtils;
|
||||
import org.keycloak.models.utils.PostMigrationEvent;
|
||||
import org.keycloak.provider.ProviderEvent;
|
||||
import org.keycloak.provider.ProviderEventListener;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Set;
|
||||
|
||||
public class InfinispanUserSessionProviderFactory implements UserSessionProviderFactory {
|
||||
|
||||
private static final Logger log = Logger.getLogger(InfinispanUserSessionProviderFactory.class);
|
||||
|
||||
public static final String PROVIDER_ID = "infinispan";
|
||||
|
||||
public static final String REALM_REMOVED_SESSION_EVENT = "REALM_REMOVED_EVENT_SESSIONS";
|
||||
|
||||
public static final String CLIENT_REMOVED_SESSION_EVENT = "CLIENT_REMOVED_SESSION_SESSIONS";
|
||||
|
||||
public static final String REMOVE_USER_SESSIONS_EVENT = "REMOVE_USER_SESSIONS_EVENT";
|
||||
|
||||
public static final String REMOVE_ALL_LOGIN_FAILURES_EVENT = "REMOVE_ALL_LOGIN_FAILURES_EVENT";
|
||||
|
||||
private Config.Scope config;
|
||||
|
||||
private RemoteCacheInvoker remoteCacheInvoker;
|
||||
private LastSessionRefreshStore lastSessionRefreshStore;
|
||||
private LastSessionRefreshStore offlineLastSessionRefreshStore;
|
||||
|
||||
@Override
|
||||
public InfinispanUserSessionProvider create(KeycloakSession session) {
|
||||
InfinispanConnectionProvider connections = session.getProvider(InfinispanConnectionProvider.class);
|
||||
Cache<String, SessionEntity> cache = connections.getCache(InfinispanConnectionProvider.SESSION_CACHE_NAME);
|
||||
Cache<String, SessionEntity> offlineSessionsCache = connections.getCache(InfinispanConnectionProvider.OFFLINE_SESSION_CACHE_NAME);
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = connections.getCache(InfinispanConnectionProvider.SESSION_CACHE_NAME);
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> offlineSessionsCache = connections.getCache(InfinispanConnectionProvider.OFFLINE_SESSION_CACHE_NAME);
|
||||
Cache<LoginFailureKey, LoginFailureEntity> loginFailures = connections.getCache(InfinispanConnectionProvider.LOGIN_FAILURE_CACHE_NAME);
|
||||
|
||||
return new InfinispanUserSessionProvider(session, cache, offlineSessionsCache, loginFailures);
|
||||
return new InfinispanUserSessionProvider(session, remoteCacheInvoker, lastSessionRefreshStore, offlineLastSessionRefreshStore, cache, offlineSessionsCache, loginFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -62,18 +97,19 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
|
|||
|
||||
@Override
|
||||
public void postInit(final KeycloakSessionFactory factory) {
|
||||
// Max count of worker errors. Initialization will end with exception when this number is reached
|
||||
final int maxErrors = config.getInt("maxErrors", 20);
|
||||
|
||||
// Count of sessions to be computed in each segment
|
||||
final int sessionsPerSegment = config.getInt("sessionsPerSegment", 100);
|
||||
|
||||
factory.register(new ProviderEventListener() {
|
||||
|
||||
@Override
|
||||
public void onEvent(ProviderEvent event) {
|
||||
if (event instanceof PostMigrationEvent) {
|
||||
loadPersistentSessions(factory, maxErrors, sessionsPerSegment);
|
||||
KeycloakSession session = ((PostMigrationEvent) event).getSession();
|
||||
|
||||
checkRemoteCaches(session);
|
||||
loadPersistentSessions(factory, getMaxErrors(), getSessionsPerSegment());
|
||||
registerClusterListeners(session);
|
||||
loadSessionsFromRemoteCaches(session);
|
||||
|
||||
} else if (event instanceof UserModel.UserRemovedEvent) {
|
||||
UserModel.UserRemovedEvent userRemovedEvent = (UserModel.UserRemovedEvent) event;
|
||||
|
||||
|
@ -84,35 +120,169 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
|
|||
});
|
||||
}
|
||||
|
||||
// Max count of worker errors. Initialization will end with exception when this number is reached
|
||||
private int getMaxErrors() {
|
||||
return config.getInt("maxErrors", 20);
|
||||
}
|
||||
|
||||
// Count of sessions to be computed in each segment
|
||||
private int getSessionsPerSegment() {
|
||||
return config.getInt("sessionsPerSegment", 100);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void loadPersistentSessions(final KeycloakSessionFactory sessionFactory, final int maxErrors, final int sessionsPerSegment) {
|
||||
log.debug("Start pre-loading userSessions and clientSessions from persistent storage");
|
||||
log.debug("Start pre-loading userSessions from persistent storage");
|
||||
|
||||
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
|
||||
|
||||
@Override
|
||||
public void run(KeycloakSession session) {
|
||||
InfinispanConnectionProvider connections = session.getProvider(InfinispanConnectionProvider.class);
|
||||
Cache<String, Serializable> cache = connections.getCache(InfinispanConnectionProvider.WORK_CACHE_NAME);
|
||||
Cache<String, Serializable> workCache = connections.getCache(InfinispanConnectionProvider.WORK_CACHE_NAME);
|
||||
|
||||
InfinispanCacheInitializer ispnInitializer = new InfinispanCacheInitializer(sessionFactory, workCache, new OfflinePersistentUserSessionLoader(), "offlineUserSessions", sessionsPerSegment, maxErrors);
|
||||
|
||||
// DB-lock to ensure that persistent sessions are loaded from DB just on one DC. The other DCs will load them from remote cache.
|
||||
CacheInitializer initializer = new DBLockBasedCacheInitializer(session, ispnInitializer);
|
||||
|
||||
InfinispanUserSessionInitializer initializer = new InfinispanUserSessionInitializer(sessionFactory, cache, new OfflineUserSessionLoader(), maxErrors, sessionsPerSegment, "offlineUserSessions");
|
||||
initializer.initCache();
|
||||
initializer.loadPersistentSessions();
|
||||
initializer.loadSessions();
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
log.debug("Pre-loading userSessions and clientSessions from persistent storage finished");
|
||||
log.debug("Pre-loading userSessions from persistent storage finished");
|
||||
}
|
||||
|
||||
|
||||
protected void registerClusterListeners(KeycloakSession session) {
|
||||
KeycloakSessionFactory sessionFactory = session.getKeycloakSessionFactory();
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
|
||||
cluster.registerListener(REALM_REMOVED_SESSION_EVENT, new AbstractUserSessionClusterListener<RealmRemovedSessionEvent>(sessionFactory) {
|
||||
|
||||
@Override
|
||||
protected void eventReceived(KeycloakSession session, InfinispanUserSessionProvider provider, RealmRemovedSessionEvent sessionEvent) {
|
||||
provider.onRealmRemovedEvent(sessionEvent.getRealmId());
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
cluster.registerListener(CLIENT_REMOVED_SESSION_EVENT, new AbstractUserSessionClusterListener<ClientRemovedSessionEvent>(sessionFactory) {
|
||||
|
||||
@Override
|
||||
protected void eventReceived(KeycloakSession session, InfinispanUserSessionProvider provider, ClientRemovedSessionEvent sessionEvent) {
|
||||
provider.onClientRemovedEvent(sessionEvent.getRealmId(), sessionEvent.getClientUuid());
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
cluster.registerListener(REMOVE_USER_SESSIONS_EVENT, new AbstractUserSessionClusterListener<RemoveUserSessionsEvent>(sessionFactory) {
|
||||
|
||||
@Override
|
||||
protected void eventReceived(KeycloakSession session, InfinispanUserSessionProvider provider, RemoveUserSessionsEvent sessionEvent) {
|
||||
provider.onRemoveUserSessionsEvent(sessionEvent.getRealmId());
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
cluster.registerListener(REMOVE_ALL_LOGIN_FAILURES_EVENT, new AbstractUserSessionClusterListener<RemoveAllUserLoginFailuresEvent>(sessionFactory) {
|
||||
|
||||
@Override
|
||||
protected void eventReceived(KeycloakSession session, InfinispanUserSessionProvider provider, RemoveAllUserLoginFailuresEvent sessionEvent) {
|
||||
provider.onRemoveAllUserLoginFailuresEvent(sessionEvent.getRealmId());
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
log.debug("Registered cluster listeners");
|
||||
}
|
||||
|
||||
|
||||
protected void checkRemoteCaches(KeycloakSession session) {
|
||||
this.remoteCacheInvoker = new RemoteCacheInvoker();
|
||||
|
||||
InfinispanConnectionProvider ispn = session.getProvider(InfinispanConnectionProvider.class);
|
||||
|
||||
Cache sessionsCache = ispn.getCache(InfinispanConnectionProvider.SESSION_CACHE_NAME);
|
||||
boolean sessionsRemoteCache = checkRemoteCache(session, sessionsCache, (RealmModel realm) -> {
|
||||
return realm.getSsoSessionIdleTimeout() * 1000;
|
||||
});
|
||||
|
||||
if (sessionsRemoteCache) {
|
||||
lastSessionRefreshStore = new LastSessionRefreshStoreFactory().createAndInit(session, sessionsCache, false);
|
||||
}
|
||||
|
||||
|
||||
Cache offlineSessionsCache = ispn.getCache(InfinispanConnectionProvider.OFFLINE_SESSION_CACHE_NAME);
|
||||
boolean offlineSessionsRemoteCache = checkRemoteCache(session, offlineSessionsCache, (RealmModel realm) -> {
|
||||
return realm.getOfflineSessionIdleTimeout() * 1000;
|
||||
});
|
||||
|
||||
if (offlineSessionsRemoteCache) {
|
||||
offlineLastSessionRefreshStore = new LastSessionRefreshStoreFactory().createAndInit(session, offlineSessionsCache, true);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean checkRemoteCache(KeycloakSession session, Cache ispnCache, RemoteCacheInvoker.MaxIdleTimeLoader maxIdleLoader) {
|
||||
Set<RemoteStore> remoteStores = InfinispanUtil.getRemoteStores(ispnCache);
|
||||
|
||||
if (remoteStores.isEmpty()) {
|
||||
log.debugf("No remote store configured for cache '%s'", ispnCache.getName());
|
||||
return false;
|
||||
} else {
|
||||
log.infof("Remote store configured for cache '%s'", ispnCache.getName());
|
||||
|
||||
RemoteCache remoteCache = remoteStores.iterator().next().getRemoteCache();
|
||||
|
||||
remoteCacheInvoker.addRemoteCache(ispnCache.getName(), remoteCache, maxIdleLoader);
|
||||
|
||||
RemoteCacheSessionListener hotrodListener = RemoteCacheSessionListener.createListener(session, ispnCache, remoteCache);
|
||||
remoteCache.addClientListener(hotrodListener);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void loadSessionsFromRemoteCaches(KeycloakSession session) {
|
||||
for (String cacheName : remoteCacheInvoker.getRemoteCacheNames()) {
|
||||
loadSessionsFromRemoteCache(session.getKeycloakSessionFactory(), cacheName, getMaxErrors());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void loadSessionsFromRemoteCache(final KeycloakSessionFactory sessionFactory, String cacheName, final int maxErrors) {
|
||||
log.debugf("Check pre-loading userSessions from remote cache '%s'", cacheName);
|
||||
|
||||
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
|
||||
|
||||
@Override
|
||||
public void run(KeycloakSession session) {
|
||||
InfinispanConnectionProvider connections = session.getProvider(InfinispanConnectionProvider.class);
|
||||
Cache<String, Serializable> workCache = connections.getCache(InfinispanConnectionProvider.WORK_CACHE_NAME);
|
||||
|
||||
// Use limit for sessionsPerSegment as RemoteCache bulk load doesn't have support for pagination :/
|
||||
BaseCacheInitializer initializer = new SingleWorkerCacheInitializer(session, workCache, new RemoteCacheSessionsLoader(cacheName), "remoteCacheLoad::" + cacheName);
|
||||
|
||||
initializer.initCache();
|
||||
initializer.loadSessions();
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
log.debugf("Pre-loading userSessions from remote cache '%s' finished", cacheName);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getId() {
|
||||
return "infinispan";
|
||||
return PROVIDER_ID;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,15 +17,17 @@
|
|||
|
||||
package org.keycloak.models.sessions.infinispan;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.keycloak.models.AuthenticatedClientSessionModel;
|
||||
import org.keycloak.models.ClientModel;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.UserModel;
|
||||
import org.keycloak.models.UserSessionModel;
|
||||
import org.keycloak.models.sessions.infinispan.changes.InfinispanChangelogBasedTransaction;
|
||||
import org.keycloak.models.sessions.infinispan.changes.sessions.LastSessionRefreshChecker;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.changes.UserSessionUpdateTask;
|
||||
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
import java.util.Collections;
|
||||
|
@ -33,7 +35,6 @@ import java.util.HashMap;
|
|||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
|
@ -44,7 +45,7 @@ public class UserSessionAdapter implements UserSessionModel {
|
|||
|
||||
private final InfinispanUserSessionProvider provider;
|
||||
|
||||
private final Cache<String, SessionEntity> cache;
|
||||
private final InfinispanChangelogBasedTransaction updateTx;
|
||||
|
||||
private final RealmModel realm;
|
||||
|
||||
|
@ -52,11 +53,11 @@ public class UserSessionAdapter implements UserSessionModel {
|
|||
|
||||
private final boolean offline;
|
||||
|
||||
public UserSessionAdapter(KeycloakSession session, InfinispanUserSessionProvider provider, Cache<String, SessionEntity> cache, RealmModel realm,
|
||||
public UserSessionAdapter(KeycloakSession session, InfinispanUserSessionProvider provider, InfinispanChangelogBasedTransaction updateTx, RealmModel realm,
|
||||
UserSessionEntity entity, boolean offline) {
|
||||
this.session = session;
|
||||
this.provider = provider;
|
||||
this.cache = cache;
|
||||
this.updateTx = updateTx;
|
||||
this.realm = realm;
|
||||
this.entity = entity;
|
||||
this.offline = offline;
|
||||
|
@ -74,7 +75,7 @@ public class UserSessionAdapter implements UserSessionModel {
|
|||
// Check if client still exists
|
||||
ClientModel client = realm.getClientById(key);
|
||||
if (client != null) {
|
||||
result.put(key, new AuthenticatedClientSessionAdapter(value, client, this, provider, cache));
|
||||
result.put(key, new AuthenticatedClientSessionAdapter(value, client, this, provider, updateTx));
|
||||
} else {
|
||||
removedClientUUIDS.add(key);
|
||||
}
|
||||
|
@ -83,10 +84,18 @@ public class UserSessionAdapter implements UserSessionModel {
|
|||
|
||||
// Update user session
|
||||
if (!removedClientUUIDS.isEmpty()) {
|
||||
for (String clientUUID : removedClientUUIDS) {
|
||||
entity.getAuthenticatedClientSessions().remove(clientUUID);
|
||||
}
|
||||
update();
|
||||
UserSessionUpdateTask task = new UserSessionUpdateTask() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity entity) {
|
||||
for (String clientUUID : removedClientUUIDS) {
|
||||
entity.getAuthenticatedClientSessions().remove(clientUUID);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
return Collections.unmodifiableMap(result);
|
||||
|
@ -114,12 +123,6 @@ public class UserSessionAdapter implements UserSessionModel {
|
|||
return session.users().getUserById(entity.getUser(), realm);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setUser(UserModel user) {
|
||||
entity.setUser(user.getId());
|
||||
update();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLoginUsername() {
|
||||
return entity.getLoginUsername();
|
||||
|
@ -148,8 +151,21 @@ public class UserSessionAdapter implements UserSessionModel {
|
|||
}
|
||||
|
||||
public void setLastSessionRefresh(int lastSessionRefresh) {
|
||||
entity.setLastSessionRefresh(lastSessionRefresh);
|
||||
update();
|
||||
UserSessionUpdateTask task = new UserSessionUpdateTask() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity entity) {
|
||||
entity.setLastSessionRefresh(lastSessionRefresh);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
|
||||
return new LastSessionRefreshChecker(provider.getLastSessionRefreshStore(), provider.getOfflineLastSessionRefreshStore())
|
||||
.getCrossDCMessageStatus(UserSessionAdapter.this.session, UserSessionAdapter.this.realm, sessionWrapper, offline, lastSessionRefresh);
|
||||
}
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -159,22 +175,36 @@ public class UserSessionAdapter implements UserSessionModel {
|
|||
|
||||
@Override
|
||||
public void setNote(String name, String value) {
|
||||
if (value == null) {
|
||||
if (entity.getNotes().containsKey(name)) {
|
||||
removeNote(name);
|
||||
UserSessionUpdateTask task = new UserSessionUpdateTask() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity entity) {
|
||||
if (value == null) {
|
||||
if (entity.getNotes().containsKey(name)) {
|
||||
removeNote(name);
|
||||
}
|
||||
return;
|
||||
}
|
||||
entity.getNotes().put(name, value);
|
||||
}
|
||||
return;
|
||||
}
|
||||
entity.getNotes().put(name, value);
|
||||
update();
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNote(String name) {
|
||||
if (entity.getNotes() != null) {
|
||||
entity.getNotes().remove(name);
|
||||
update();
|
||||
}
|
||||
UserSessionUpdateTask task = new UserSessionUpdateTask() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity entity) {
|
||||
entity.getNotes().remove(name);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -189,19 +219,34 @@ public class UserSessionAdapter implements UserSessionModel {
|
|||
|
||||
@Override
|
||||
public void setState(State state) {
|
||||
entity.setState(state);
|
||||
update();
|
||||
UserSessionUpdateTask task = new UserSessionUpdateTask() {
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity entity) {
|
||||
entity.setState(state);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void restartSession(RealmModel realm, UserModel user, String loginUsername, String ipAddress, String authMethod, boolean rememberMe, String brokerSessionId, String brokerUserId) {
|
||||
provider.updateSessionEntity(entity, realm, user, loginUsername, ipAddress, authMethod, rememberMe, brokerSessionId, brokerUserId);
|
||||
UserSessionUpdateTask task = new UserSessionUpdateTask() {
|
||||
|
||||
entity.setState(null);
|
||||
entity.getNotes().clear();
|
||||
entity.getAuthenticatedClientSessions().clear();
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity entity) {
|
||||
provider.updateSessionEntity(entity, realm, user, loginUsername, ipAddress, authMethod, rememberMe, brokerSessionId, brokerUserId);
|
||||
|
||||
update();
|
||||
entity.setState(null);
|
||||
entity.getNotes().clear();
|
||||
entity.getAuthenticatedClientSessions().clear();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
update(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -222,11 +267,8 @@ public class UserSessionAdapter implements UserSessionModel {
|
|||
return entity;
|
||||
}
|
||||
|
||||
void update() {
|
||||
provider.getTx().replace(cache, entity.getId(), entity);
|
||||
void update(UserSessionUpdateTask task) {
|
||||
updateTx.addTask(getId(), task);
|
||||
}
|
||||
|
||||
Cache<String, SessionEntity> getCache() {
|
||||
return cache;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,233 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.AbstractKeycloakTransaction;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class InfinispanChangelogBasedTransaction<S extends SessionEntity> extends AbstractKeycloakTransaction {
|
||||
|
||||
public static final Logger logger = Logger.getLogger(InfinispanChangelogBasedTransaction.class);
|
||||
|
||||
private final KeycloakSession kcSession;
|
||||
private final String cacheName;
|
||||
private final Cache<String, SessionEntityWrapper<S>> cache;
|
||||
private final RemoteCacheInvoker remoteCacheInvoker;
|
||||
|
||||
private final Map<String, SessionUpdatesList<S>> updates = new HashMap<>();
|
||||
|
||||
public InfinispanChangelogBasedTransaction(KeycloakSession kcSession, String cacheName, Cache<String, SessionEntityWrapper<S>> cache, RemoteCacheInvoker remoteCacheInvoker) {
|
||||
this.kcSession = kcSession;
|
||||
this.cacheName = cacheName;
|
||||
this.cache = cache;
|
||||
this.remoteCacheInvoker = remoteCacheInvoker;
|
||||
}
|
||||
|
||||
|
||||
public void addTask(String key, SessionUpdateTask<S> task) {
|
||||
SessionUpdatesList<S> myUpdates = updates.get(key);
|
||||
if (myUpdates == null) {
|
||||
// Lookup entity from cache
|
||||
SessionEntityWrapper<S> wrappedEntity = cache.get(key);
|
||||
if (wrappedEntity == null) {
|
||||
logger.warnf("Not present cache item for key %s", key);
|
||||
return;
|
||||
}
|
||||
|
||||
RealmModel realm = kcSession.realms().getRealm(wrappedEntity.getEntity().getRealm());
|
||||
|
||||
myUpdates = new SessionUpdatesList<>(realm, wrappedEntity);
|
||||
updates.put(key, myUpdates);
|
||||
}
|
||||
|
||||
// Run the update now, so reader in same transaction can see it (TODO: Rollback may not work correctly. See if it's an issue..)
|
||||
task.runUpdate(myUpdates.getEntityWrapper().getEntity());
|
||||
myUpdates.add(task);
|
||||
}
|
||||
|
||||
|
||||
// Create entity and new version for it
|
||||
public void addTask(String key, SessionUpdateTask<S> task, S entity) {
|
||||
if (entity == null) {
|
||||
throw new IllegalArgumentException("Null entity not allowed");
|
||||
}
|
||||
|
||||
RealmModel realm = kcSession.realms().getRealm(entity.getRealm());
|
||||
SessionEntityWrapper<S> wrappedEntity = new SessionEntityWrapper<>(entity);
|
||||
SessionUpdatesList<S> myUpdates = new SessionUpdatesList<>(realm, wrappedEntity);
|
||||
updates.put(key, myUpdates);
|
||||
|
||||
// Run the update now, so reader in same transaction can see it
|
||||
task.runUpdate(entity);
|
||||
myUpdates.add(task);
|
||||
}
|
||||
|
||||
|
||||
public void reloadEntityInCurrentTransaction(RealmModel realm, String key, SessionEntityWrapper<S> entity) {
|
||||
if (entity == null) {
|
||||
throw new IllegalArgumentException("Null entity not allowed");
|
||||
}
|
||||
|
||||
SessionEntityWrapper<S> latestEntity = cache.get(key);
|
||||
if (latestEntity == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
SessionUpdatesList<S> newUpdates = new SessionUpdatesList<>(realm, latestEntity);
|
||||
|
||||
SessionUpdatesList<S> existingUpdates = updates.get(key);
|
||||
if (existingUpdates != null) {
|
||||
newUpdates.setUpdateTasks(existingUpdates.getUpdateTasks());
|
||||
}
|
||||
|
||||
updates.put(key, newUpdates);
|
||||
}
|
||||
|
||||
|
||||
public SessionEntityWrapper<S> get(String key) {
|
||||
SessionUpdatesList<S> myUpdates = updates.get(key);
|
||||
if (myUpdates == null) {
|
||||
SessionEntityWrapper<S> wrappedEntity = cache.get(key);
|
||||
if (wrappedEntity == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
RealmModel realm = kcSession.realms().getRealm(wrappedEntity.getEntity().getRealm());
|
||||
|
||||
myUpdates = new SessionUpdatesList<>(realm, wrappedEntity);
|
||||
updates.put(key, myUpdates);
|
||||
|
||||
return wrappedEntity;
|
||||
} else {
|
||||
return myUpdates.getEntityWrapper();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void commitImpl() {
|
||||
for (Map.Entry<String, SessionUpdatesList<S>> entry : updates.entrySet()) {
|
||||
SessionUpdatesList<S> sessionUpdates = entry.getValue();
|
||||
SessionEntityWrapper<S> sessionWrapper = sessionUpdates.getEntityWrapper();
|
||||
|
||||
RealmModel realm = sessionUpdates.getRealm();
|
||||
|
||||
MergedUpdate<S> merged = MergedUpdate.computeUpdate(sessionUpdates.getUpdateTasks(), sessionWrapper);
|
||||
|
||||
if (merged != null) {
|
||||
// Now run the operation in our cluster
|
||||
runOperationInCluster(entry.getKey(), merged, sessionWrapper);
|
||||
|
||||
// Check if we need to send message to second DC
|
||||
remoteCacheInvoker.runTask(kcSession, realm, cacheName, entry.getKey(), merged, sessionWrapper);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void runOperationInCluster(String key, MergedUpdate<S> task, SessionEntityWrapper<S> sessionWrapper) {
|
||||
S session = sessionWrapper.getEntity();
|
||||
SessionUpdateTask.CacheOperation operation = task.getOperation(session);
|
||||
|
||||
// Don't need to run update of underlying entity. Local updates were already run
|
||||
//task.runUpdate(session);
|
||||
|
||||
switch (operation) {
|
||||
case REMOVE:
|
||||
// Just remove it
|
||||
cache
|
||||
.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES)
|
||||
.remove(key);
|
||||
break;
|
||||
case ADD:
|
||||
cache
|
||||
.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES)
|
||||
.put(key, sessionWrapper, task.getLifespanMs(), TimeUnit.MILLISECONDS);
|
||||
break;
|
||||
case ADD_IF_ABSENT:
|
||||
SessionEntityWrapper existing = cache.putIfAbsent(key, sessionWrapper);
|
||||
if (existing != null) {
|
||||
throw new IllegalStateException("There is already existing value in cache for key " + key);
|
||||
}
|
||||
break;
|
||||
case REPLACE:
|
||||
replace(key, task, sessionWrapper);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unsupported state " + operation);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void replace(String key, MergedUpdate<S> task, SessionEntityWrapper<S> oldVersionEntity) {
|
||||
boolean replaced = false;
|
||||
S session = oldVersionEntity.getEntity();
|
||||
|
||||
while (!replaced) {
|
||||
SessionEntityWrapper<S> newVersionEntity = generateNewVersionAndWrapEntity(session, oldVersionEntity.getLocalMetadata());
|
||||
|
||||
// Atomic cluster-aware replace
|
||||
replaced = cache.replace(key, oldVersionEntity, newVersionEntity);
|
||||
|
||||
// Replace fail. Need to load latest entity from cache, apply updates again and try to replace in cache again
|
||||
if (!replaced) {
|
||||
logger.debugf("Replace failed for entity: %s . Will try again", key);
|
||||
|
||||
oldVersionEntity = cache.get(key);
|
||||
|
||||
if (oldVersionEntity == null) {
|
||||
logger.debugf("Entity %s not found. Maybe removed in the meantime. Replace task will be ignored", key);
|
||||
return;
|
||||
}
|
||||
|
||||
session = oldVersionEntity.getEntity();
|
||||
|
||||
task.runUpdate(session);
|
||||
} else {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.tracef("Replace SUCCESS for entity: %s . old version: %d, new version: %d", key, oldVersionEntity.getVersion(), newVersionEntity.getVersion());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void rollbackImpl() {
|
||||
}
|
||||
|
||||
private SessionEntityWrapper<S> generateNewVersionAndWrapEntity(S entity, Map<String, String> localMetadata) {
|
||||
return new SessionEntityWrapper<>(localMetadata, entity);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes;
|
||||
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
class MergedUpdate<S extends SessionEntity> implements SessionUpdateTask<S> {
|
||||
|
||||
private List<SessionUpdateTask<S>> childUpdates = new LinkedList<>();
|
||||
private CacheOperation operation;
|
||||
private CrossDCMessageStatus crossDCMessageStatus;
|
||||
|
||||
|
||||
public MergedUpdate(CacheOperation operation, CrossDCMessageStatus crossDCMessageStatus) {
|
||||
this.operation = operation;
|
||||
this.crossDCMessageStatus = crossDCMessageStatus;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void runUpdate(S session) {
|
||||
for (SessionUpdateTask<S> child : childUpdates) {
|
||||
child.runUpdate(session);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CacheOperation getOperation(S session) {
|
||||
return operation;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<S> sessionWrapper) {
|
||||
return crossDCMessageStatus;
|
||||
}
|
||||
|
||||
|
||||
public static <S extends SessionEntity> MergedUpdate<S> computeUpdate(List<SessionUpdateTask<S>> childUpdates, SessionEntityWrapper<S> sessionWrapper) {
|
||||
if (childUpdates == null || childUpdates.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
MergedUpdate<S> result = null;
|
||||
S session = sessionWrapper.getEntity();
|
||||
for (SessionUpdateTask<S> child : childUpdates) {
|
||||
if (result == null) {
|
||||
result = new MergedUpdate<>(child.getOperation(session), child.getCrossDCMessageStatus(sessionWrapper));
|
||||
result.childUpdates.add(child);
|
||||
} else {
|
||||
|
||||
// Merge the operations. REMOVE is special case as other operations are not needed then.
|
||||
CacheOperation mergedOp = result.getOperation(session).merge(child.getOperation(session), session);
|
||||
if (mergedOp == CacheOperation.REMOVE) {
|
||||
result = new MergedUpdate<>(child.getOperation(session), child.getCrossDCMessageStatus(sessionWrapper));
|
||||
result.childUpdates.add(child);
|
||||
return result;
|
||||
}
|
||||
|
||||
result.operation = mergedOp;
|
||||
|
||||
// Check if we need to send message to other DCs and how critical it is
|
||||
CrossDCMessageStatus currentDCStatus = result.getCrossDCMessageStatus(sessionWrapper);
|
||||
|
||||
// Optimization. If we already have SYNC, we don't need to retrieve childDCStatus
|
||||
if (currentDCStatus != CrossDCMessageStatus.SYNC) {
|
||||
CrossDCMessageStatus childDCStatus = child.getCrossDCMessageStatus(sessionWrapper);
|
||||
result.crossDCMessageStatus = currentDCStatus.merge(childDCStatus);
|
||||
}
|
||||
|
||||
// Finally add another update to the result
|
||||
result.childUpdates.add(child);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.infinispan.commons.marshall.Externalizer;
|
||||
import org.infinispan.commons.marshall.MarshallUtil;
|
||||
import org.infinispan.commons.marshall.SerializeWith;
|
||||
import org.keycloak.models.sessions.infinispan.changes.sessions.SessionData;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
@SerializeWith(SessionEntityWrapper.ExternalizerImpl.class)
|
||||
public class SessionEntityWrapper<S extends SessionEntity> {
|
||||
|
||||
private UUID version;
|
||||
private final S entity;
|
||||
private final Map<String, String> localMetadata;
|
||||
|
||||
|
||||
protected SessionEntityWrapper(UUID version, Map<String, String> localMetadata, S entity) {
|
||||
if (version == null) {
|
||||
throw new IllegalArgumentException("Version UUID can't be null");
|
||||
}
|
||||
|
||||
this.version = version;
|
||||
this.localMetadata = localMetadata;
|
||||
this.entity = entity;
|
||||
}
|
||||
|
||||
public SessionEntityWrapper(Map<String, String> localMetadata, S entity) {
|
||||
this(UUID.randomUUID(),localMetadata, entity);
|
||||
}
|
||||
|
||||
public SessionEntityWrapper(S entity) {
|
||||
this(new ConcurrentHashMap<>(), entity);
|
||||
}
|
||||
|
||||
|
||||
public UUID getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(UUID version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
|
||||
public S getEntity() {
|
||||
return entity;
|
||||
}
|
||||
|
||||
public String getLocalMetadataNote(String key) {
|
||||
return localMetadata.get(key);
|
||||
}
|
||||
|
||||
public void putLocalMetadataNote(String key, String value) {
|
||||
localMetadata.put(key, value);
|
||||
}
|
||||
|
||||
public Integer getLocalMetadataNoteInt(String key) {
|
||||
String note = getLocalMetadataNote(key);
|
||||
return note==null ? null : Integer.parseInt(note);
|
||||
}
|
||||
|
||||
public void putLocalMetadataNoteInt(String key, int value) {
|
||||
localMetadata.put(key, String.valueOf(value));
|
||||
}
|
||||
|
||||
public Map<String, String> getLocalMetadata() {
|
||||
return localMetadata;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (!(o instanceof SessionEntityWrapper)) return false;
|
||||
|
||||
SessionEntityWrapper that = (SessionEntityWrapper) o;
|
||||
|
||||
if (!Objects.equals(version, that.version)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return Objects.equals(entity, that.entity);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(version) * 17
|
||||
+ Objects.hashCode(entity);
|
||||
}
|
||||
|
||||
|
||||
public static class ExternalizerImpl implements Externalizer<SessionEntityWrapper> {
|
||||
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, SessionEntityWrapper obj) throws IOException {
|
||||
MarshallUtil.marshallUUID(obj.version, output, false);
|
||||
MarshallUtil.marshallMap(obj.localMetadata, output);
|
||||
output.writeObject(obj.getEntity());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public SessionEntityWrapper readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
UUID objVersion = MarshallUtil.unmarshallUUID(input, false);
|
||||
|
||||
Map<String, String> localMetadata = MarshallUtil.unmarshallMap(input, new MarshallUtil.MapBuilder<String, String, Map<String, String>>() {
|
||||
|
||||
@Override
|
||||
public Map<String, String> build(int size) {
|
||||
return new ConcurrentHashMap<>(size);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
SessionEntity entity = (SessionEntity) input.readObject();
|
||||
|
||||
return new SessionEntityWrapper<>(objVersion, localMetadata, entity);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes;
|
||||
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public interface SessionUpdateTask<S extends SessionEntity> {
|
||||
|
||||
void runUpdate(S entity);
|
||||
|
||||
CacheOperation getOperation(S entity);
|
||||
|
||||
CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<S> sessionWrapper);
|
||||
|
||||
default long getLifespanMs() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
enum CacheOperation {
|
||||
|
||||
ADD,
|
||||
ADD_IF_ABSENT, // ADD_IF_ABSENT throws an exception if there is existing value
|
||||
REMOVE,
|
||||
REPLACE;
|
||||
|
||||
CacheOperation merge(CacheOperation other, SessionEntity entity) {
|
||||
if (this == REMOVE || other == REMOVE) {
|
||||
return REMOVE;
|
||||
}
|
||||
|
||||
if (this == ADD | this == ADD_IF_ABSENT) {
|
||||
if (other == ADD | other == ADD_IF_ABSENT) {
|
||||
throw new IllegalStateException("Illegal state. Task already in progress for session " + entity.getId());
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
// Lowest priority
|
||||
return REPLACE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
enum CrossDCMessageStatus {
|
||||
SYNC,
|
||||
//ASYNC,
|
||||
// QUEUE,
|
||||
NOT_NEEDED;
|
||||
|
||||
|
||||
CrossDCMessageStatus merge(CrossDCMessageStatus other) {
|
||||
if (this == SYNC || other == SYNC) {
|
||||
return SYNC;
|
||||
}
|
||||
|
||||
/*if (this == ASYNC || other == ASYNC) {
|
||||
return ASYNC;
|
||||
}*/
|
||||
|
||||
return NOT_NEEDED;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes;
|
||||
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
|
||||
/**
|
||||
* tracks all changes to the underlying session in this transaction
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
class SessionUpdatesList<S extends SessionEntity> {
|
||||
|
||||
private final RealmModel realm;
|
||||
|
||||
private final SessionEntityWrapper<S> entityWrapper;
|
||||
|
||||
private List<SessionUpdateTask<S>> updateTasks = new LinkedList<>();
|
||||
|
||||
public SessionUpdatesList(RealmModel realm, SessionEntityWrapper<S> entityWrapper) {
|
||||
this.realm = realm;
|
||||
this.entityWrapper = entityWrapper;
|
||||
}
|
||||
|
||||
public RealmModel getRealm() {
|
||||
return realm;
|
||||
}
|
||||
|
||||
public SessionEntityWrapper<S> getEntityWrapper() {
|
||||
return entityWrapper;
|
||||
}
|
||||
|
||||
|
||||
public void add(SessionUpdateTask<S> task) {
|
||||
updateTasks.add(task);
|
||||
}
|
||||
|
||||
public List<SessionUpdateTask<S>> getUpdateTasks() {
|
||||
return updateTasks;
|
||||
}
|
||||
|
||||
public void setUpdateTasks(List<SessionUpdateTask<S>> updateTasks) {
|
||||
this.updateTasks = updateTasks;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
/**
|
||||
* Task for create or update AuthenticatedClientSessionEntity within userSession
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public abstract class UserSessionClientSessionUpdateTask extends UserSessionUpdateTask {
|
||||
|
||||
public static final Logger logger = Logger.getLogger(UserSessionClientSessionUpdateTask.class);
|
||||
|
||||
private final String clientUUID;
|
||||
|
||||
public UserSessionClientSessionUpdateTask(String clientUUID) {
|
||||
this.clientUUID = clientUUID;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void runUpdate(UserSessionEntity userSession) {
|
||||
AuthenticatedClientSessionEntity clientSession = userSession.getAuthenticatedClientSessions().get(clientUUID);
|
||||
if (clientSession == null) {
|
||||
logger.warnf("Not found authenticated client session entity for client %s in userSession %s", clientUUID, userSession.getId());
|
||||
return;
|
||||
}
|
||||
|
||||
runClientSessionUpdate(clientSession);
|
||||
}
|
||||
|
||||
protected abstract void runClientSessionUpdate(AuthenticatedClientSessionEntity entity);
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes;
|
||||
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public abstract class UserSessionUpdateTask implements SessionUpdateTask<UserSessionEntity> {
|
||||
|
||||
@Override
|
||||
public CacheOperation getOperation(UserSessionEntity session) {
|
||||
return CacheOperation.REPLACE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
|
||||
return CrossDCMessageStatus.SYNC;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright 2017 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes.sessions;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class LastSessionRefreshChecker {
|
||||
|
||||
public static final Logger logger = Logger.getLogger(LastSessionRefreshChecker.class);
|
||||
|
||||
private final LastSessionRefreshStore store;
|
||||
private final LastSessionRefreshStore offlineStore;
|
||||
|
||||
|
||||
public LastSessionRefreshChecker(LastSessionRefreshStore store, LastSessionRefreshStore offlineStore) {
|
||||
this.store = store;
|
||||
this.offlineStore = offlineStore;
|
||||
}
|
||||
|
||||
|
||||
// Metadata attribute, which contains the lastSessionRefresh available on remoteCache. Used in decide whether we need to write to remoteCache (DC) or not
|
||||
public static final String LAST_SESSION_REFRESH_REMOTE = "lsrr";
|
||||
|
||||
|
||||
public SessionUpdateTask.CrossDCMessageStatus getCrossDCMessageStatus(KeycloakSession kcSession, RealmModel realm, SessionEntityWrapper<UserSessionEntity> sessionWrapper, boolean offline, int newLastSessionRefresh) {
|
||||
// revokeRefreshToken always writes everything to remoteCache immediately
|
||||
if (realm.isRevokeRefreshToken()) {
|
||||
return SessionUpdateTask.CrossDCMessageStatus.SYNC;
|
||||
}
|
||||
|
||||
// We're likely not in cross-dc environment. Doesn't matter what we return
|
||||
LastSessionRefreshStore storeToUse = offline ? offlineStore : store;
|
||||
if (storeToUse == null) {
|
||||
return SessionUpdateTask.CrossDCMessageStatus.SYNC;
|
||||
}
|
||||
|
||||
Boolean ignoreRemoteCacheUpdate = (Boolean) kcSession.getAttribute(LastSessionRefreshListener.IGNORE_REMOTE_CACHE_UPDATE);
|
||||
if (ignoreRemoteCacheUpdate != null && ignoreRemoteCacheUpdate) {
|
||||
return SessionUpdateTask.CrossDCMessageStatus.NOT_NEEDED;
|
||||
}
|
||||
|
||||
Integer lsrr = sessionWrapper.getLocalMetadataNoteInt(LAST_SESSION_REFRESH_REMOTE);
|
||||
if (lsrr == null) {
|
||||
logger.warnf("Not available lsrr note on user session %s.", sessionWrapper.getEntity().getId());
|
||||
return SessionUpdateTask.CrossDCMessageStatus.SYNC;
|
||||
}
|
||||
|
||||
int idleTimeout = offline ? realm.getOfflineSessionIdleTimeout() : realm.getSsoSessionIdleTimeout();
|
||||
|
||||
if (lsrr + (idleTimeout / 2) <= newLastSessionRefresh) {
|
||||
logger.debugf("We are going to write remotely. Remote last session refresh: %d, New last session refresh: %d", (int) lsrr, newLastSessionRefresh);
|
||||
return SessionUpdateTask.CrossDCMessageStatus.SYNC;
|
||||
}
|
||||
|
||||
logger.debugf("Skip writing last session refresh to the remoteCache. Session %s newLastSessionRefresh %d", sessionWrapper.getEntity().getId(), newLastSessionRefresh);
|
||||
|
||||
storeToUse.putLastSessionRefresh(kcSession, sessionWrapper.getEntity().getId(), realm.getId(), newLastSessionRefresh);
|
||||
|
||||
return SessionUpdateTask.CrossDCMessageStatus.NOT_NEEDED;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright 2017 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes.sessions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.infinispan.commons.marshall.Externalizer;
|
||||
import org.infinispan.commons.marshall.MarshallUtil;
|
||||
import org.infinispan.commons.marshall.SerializeWith;
|
||||
import org.keycloak.cluster.ClusterEvent;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
@SerializeWith(LastSessionRefreshEvent.ExternalizerImpl.class)
|
||||
public class LastSessionRefreshEvent implements ClusterEvent {
|
||||
|
||||
private final Map<String, SessionData> lastSessionRefreshes;
|
||||
|
||||
public LastSessionRefreshEvent(Map<String, SessionData> lastSessionRefreshes) {
|
||||
this.lastSessionRefreshes = lastSessionRefreshes;
|
||||
}
|
||||
|
||||
public Map<String, SessionData> getLastSessionRefreshes() {
|
||||
return lastSessionRefreshes;
|
||||
}
|
||||
|
||||
|
||||
public static class ExternalizerImpl implements Externalizer<LastSessionRefreshEvent> {
|
||||
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, LastSessionRefreshEvent obj) throws IOException {
|
||||
MarshallUtil.marshallMap(obj.lastSessionRefreshes, output);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public LastSessionRefreshEvent readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
Map<String, SessionData> map = MarshallUtil.unmarshallMap(input, new MarshallUtil.MapBuilder<String, SessionData, Map<String, SessionData>>() {
|
||||
|
||||
@Override
|
||||
public Map<String, SessionData> build(int size) {
|
||||
return new HashMap<>(size);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
LastSessionRefreshEvent event = new LastSessionRefreshEvent(map);
|
||||
return event;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Copyright 2017 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes.sessions;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.client.hotrod.RemoteCache;
|
||||
import org.infinispan.client.hotrod.event.ClientEvent;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.cluster.ClusterEvent;
|
||||
import org.keycloak.cluster.ClusterListener;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.UserSessionModel;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.util.InfinispanUtil;
|
||||
import org.keycloak.models.utils.KeycloakModelUtils;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class LastSessionRefreshListener implements ClusterListener {
|
||||
|
||||
public static final Logger logger = Logger.getLogger(LastSessionRefreshListener.class);
|
||||
|
||||
public static final String IGNORE_REMOTE_CACHE_UPDATE = "IGNORE_REMOTE_CACHE_UPDATE";
|
||||
|
||||
private final boolean offline;
|
||||
|
||||
private final KeycloakSessionFactory sessionFactory;
|
||||
private final Cache<String, SessionEntityWrapper> cache;
|
||||
private final boolean distributed;
|
||||
private final String myAddress;
|
||||
|
||||
public LastSessionRefreshListener(KeycloakSession session, Cache<String, SessionEntityWrapper> cache, boolean offline) {
|
||||
this.sessionFactory = session.getKeycloakSessionFactory();
|
||||
this.cache = cache;
|
||||
this.offline = offline;
|
||||
|
||||
this.distributed = InfinispanUtil.isDistributedCache(cache);
|
||||
if (this.distributed) {
|
||||
this.myAddress = InfinispanUtil.getMyAddress(session);
|
||||
} else {
|
||||
this.myAddress = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void eventReceived(ClusterEvent event) {
|
||||
Map<String, SessionData> lastSessionRefreshes = ((LastSessionRefreshEvent) event).getLastSessionRefreshes();
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debugf("Received refreshes. Offline %b, refreshes: %s", offline, lastSessionRefreshes);
|
||||
}
|
||||
|
||||
lastSessionRefreshes.entrySet().stream().forEach((entry) -> {
|
||||
String sessionId = entry.getKey();
|
||||
String realmId = entry.getValue().getRealmId();
|
||||
int lastSessionRefresh = entry.getValue().getLastSessionRefresh();
|
||||
|
||||
// All nodes will receive the message. So ensure that each node updates just lastSessionRefreshes owned by him.
|
||||
if (shouldUpdateLocalCache(sessionId)) {
|
||||
KeycloakModelUtils.runJobInTransaction(sessionFactory, (kcSession) -> {
|
||||
|
||||
RealmModel realm = kcSession.realms().getRealm(realmId);
|
||||
UserSessionModel userSession = kcSession.sessions().getUserSession(realm, sessionId);
|
||||
if (userSession == null) {
|
||||
logger.debugf("User session %s not available on node %s", sessionId, myAddress);
|
||||
} else {
|
||||
// Update just if lastSessionRefresh from event is bigger than ours
|
||||
if (lastSessionRefresh > userSession.getLastSessionRefresh()) {
|
||||
|
||||
// Ensure that remoteCache won't be updated due to this
|
||||
kcSession.setAttribute(IGNORE_REMOTE_CACHE_UPDATE, true);
|
||||
|
||||
userSession.setLastSessionRefresh(lastSessionRefresh);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
// For distributed caches, ensure that local modification is executed just on owner
|
||||
protected boolean shouldUpdateLocalCache(String key) {
|
||||
if (!distributed) {
|
||||
return true;
|
||||
} else {
|
||||
String keyAddress = InfinispanUtil.getKeyPrimaryOwnerAddress(cache, key);
|
||||
return myAddress.equals(keyAddress);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Copyright 2017 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes.sessions;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.cluster.ClusterProvider;
|
||||
import org.keycloak.common.util.Time;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
|
||||
/**
|
||||
* Tracks the queue of lastSessionRefreshes, which were updated on this host. Those will be sent to the second DC in bulk, so second DC can update
|
||||
* lastSessionRefreshes on it's side. Message is sent either periodically or if there are lots of stored lastSessionRefreshes.
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class LastSessionRefreshStore {
|
||||
|
||||
protected static final Logger logger = Logger.getLogger(LastSessionRefreshStore.class);
|
||||
|
||||
private final int maxIntervalBetweenMessagesSeconds;
|
||||
private final int maxCount;
|
||||
private final String eventKey;
|
||||
|
||||
private volatile Map<String, SessionData> lastSessionRefreshes = new ConcurrentHashMap<>();
|
||||
|
||||
private volatile int lastRun = Time.currentTime();
|
||||
|
||||
|
||||
protected LastSessionRefreshStore(int maxIntervalBetweenMessagesSeconds, int maxCount, String eventKey) {
|
||||
this.maxIntervalBetweenMessagesSeconds = maxIntervalBetweenMessagesSeconds;
|
||||
this.maxCount = maxCount;
|
||||
this.eventKey = eventKey;
|
||||
}
|
||||
|
||||
|
||||
public void putLastSessionRefresh(KeycloakSession kcSession, String sessionId, String realmId, int lastSessionRefresh) {
|
||||
lastSessionRefreshes.put(sessionId, new SessionData(realmId, lastSessionRefresh));
|
||||
|
||||
// Assume that lastSessionRefresh is same or close to current time
|
||||
checkSendingMessage(kcSession, lastSessionRefresh);
|
||||
}
|
||||
|
||||
|
||||
void checkSendingMessage(KeycloakSession kcSession, int currentTime) {
|
||||
if (lastSessionRefreshes.size() >= maxCount || lastRun + maxIntervalBetweenMessagesSeconds <= currentTime) {
|
||||
Map<String, SessionData> refreshesToSend = prepareSendingMessage(currentTime);
|
||||
|
||||
// Sending message doesn't need to be synchronized
|
||||
if (refreshesToSend != null) {
|
||||
sendMessage(kcSession, refreshesToSend);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// synchronized manipulation with internal object instances. Will return map if message should be sent. Otherwise return null
|
||||
private synchronized Map<String, SessionData> prepareSendingMessage(int currentTime) {
|
||||
if (lastSessionRefreshes.size() >= maxCount || lastRun + maxIntervalBetweenMessagesSeconds <= currentTime) {
|
||||
// Create new map instance, so that new writers will use that one
|
||||
Map<String, SessionData> copiedRefreshesToSend = lastSessionRefreshes;
|
||||
lastSessionRefreshes = new ConcurrentHashMap<>();
|
||||
lastRun = currentTime;
|
||||
|
||||
return copiedRefreshesToSend;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected void sendMessage(KeycloakSession kcSession, Map<String, SessionData> refreshesToSend) {
|
||||
LastSessionRefreshEvent event = new LastSessionRefreshEvent(refreshesToSend);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debugf("Sending lastSessionRefreshes: %s", event.getLastSessionRefreshes().toString());
|
||||
}
|
||||
|
||||
// Don't notify local DC about the lastSessionRefreshes. They were processed here already
|
||||
ClusterProvider cluster = kcSession.getProvider(ClusterProvider.class);
|
||||
cluster.notify(eventKey, event, true, ClusterProvider.DCNotify.ALL_BUT_LOCAL_DC);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright 2017 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes.sessions;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.keycloak.cluster.ClusterProvider;
|
||||
import org.keycloak.common.util.Time;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
import org.keycloak.timer.TimerProvider;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class LastSessionRefreshStoreFactory {
|
||||
|
||||
// Timer interval. The store will be checked every 5 seconds whether the message with stored lastSessionRefreshes
|
||||
public static final long DEFAULT_TIMER_INTERVAL_MS = 5000;
|
||||
|
||||
// Max interval between messages. It means that when message is sent to second DC, then another message will be sent at least after 60 seconds.
|
||||
public static final int DEFAULT_MAX_INTERVAL_BETWEEN_MESSAGES_SECONDS = 60;
|
||||
|
||||
// Max count of lastSessionRefreshes. It count of lastSessionRefreshes reach this value, the message is sent to second DC
|
||||
public static final int DEFAULT_MAX_COUNT = 100;
|
||||
|
||||
|
||||
public LastSessionRefreshStore createAndInit(KeycloakSession kcSession, Cache<String, SessionEntityWrapper> cache, boolean offline) {
|
||||
return createAndInit(kcSession, cache, DEFAULT_TIMER_INTERVAL_MS, DEFAULT_MAX_INTERVAL_BETWEEN_MESSAGES_SECONDS, DEFAULT_MAX_COUNT, offline);
|
||||
}
|
||||
|
||||
|
||||
public LastSessionRefreshStore createAndInit(KeycloakSession kcSession, Cache<String, SessionEntityWrapper> cache, long timerIntervalMs, int maxIntervalBetweenMessagesSeconds, int maxCount, boolean offline) {
|
||||
String eventKey = offline ? "lastSessionRefreshes-offline" : "lastSessionRefreshes";
|
||||
LastSessionRefreshStore store = createStoreInstance(maxIntervalBetweenMessagesSeconds, maxCount, eventKey);
|
||||
|
||||
// Register listener
|
||||
ClusterProvider cluster = kcSession.getProvider(ClusterProvider.class);
|
||||
cluster.registerListener(eventKey, new LastSessionRefreshListener(kcSession, cache, offline));
|
||||
|
||||
// Setup periodic timer check
|
||||
TimerProvider timer = kcSession.getProvider(TimerProvider.class);
|
||||
timer.scheduleTask((KeycloakSession keycloakSession) -> {
|
||||
|
||||
store.checkSendingMessage(keycloakSession, Time.currentTime());
|
||||
|
||||
}, timerIntervalMs, eventKey);
|
||||
|
||||
return store;
|
||||
}
|
||||
|
||||
|
||||
protected LastSessionRefreshStore createStoreInstance(int maxIntervalBetweenMessagesSeconds, int maxCount, String eventKey) {
|
||||
return new LastSessionRefreshStore(maxIntervalBetweenMessagesSeconds, maxCount, eventKey);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright 2017 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.changes.sessions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
|
||||
import org.infinispan.commons.marshall.Externalizer;
|
||||
import org.infinispan.commons.marshall.MarshallUtil;
|
||||
import org.infinispan.commons.marshall.SerializeWith;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
@SerializeWith(SessionData.ExternalizerImpl.class)
|
||||
public class SessionData {
|
||||
|
||||
private final String realmId;
|
||||
private final int lastSessionRefresh;
|
||||
|
||||
public SessionData(String realmId, int lastSessionRefresh) {
|
||||
this.realmId = realmId;
|
||||
this.lastSessionRefresh = lastSessionRefresh;
|
||||
}
|
||||
|
||||
public String getRealmId() {
|
||||
return realmId;
|
||||
}
|
||||
|
||||
public int getLastSessionRefresh() {
|
||||
return lastSessionRefresh;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("realmId: %s, lastSessionRefresh: %d", realmId, lastSessionRefresh);
|
||||
}
|
||||
|
||||
public static class ExternalizerImpl implements Externalizer<SessionData> {
|
||||
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, SessionData obj) throws IOException {
|
||||
MarshallUtil.marshallString(obj.realmId, output);
|
||||
MarshallUtil.marshallInt(output, obj.lastSessionRefresh);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public SessionData readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
String realmId = MarshallUtil.unmarshallString(input);
|
||||
int lastSessionRefresh = MarshallUtil.unmarshallInt(input);
|
||||
|
||||
return new SessionData(realmId, lastSessionRefresh);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -17,13 +17,24 @@
|
|||
|
||||
package org.keycloak.models.sessions.infinispan.entities;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.io.Serializable;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.infinispan.commons.marshall.Externalizer;
|
||||
import org.infinispan.commons.marshall.MarshallUtil;
|
||||
import org.infinispan.commons.marshall.SerializeWith;
|
||||
import org.keycloak.models.sessions.infinispan.util.KeycloakMarshallUtil;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
@SerializeWith(AuthenticatedClientSessionEntity.ExternalizerImpl.class)
|
||||
public class AuthenticatedClientSessionEntity implements Serializable {
|
||||
|
||||
private String authMethod;
|
||||
|
@ -33,7 +44,7 @@ public class AuthenticatedClientSessionEntity implements Serializable {
|
|||
|
||||
private Set<String> roles;
|
||||
private Set<String> protocolMappers;
|
||||
private Map<String, String> notes;
|
||||
private Map<String, String> notes = new ConcurrentHashMap<>();
|
||||
|
||||
public String getAuthMethod() {
|
||||
return authMethod;
|
||||
|
@ -91,4 +102,46 @@ public class AuthenticatedClientSessionEntity implements Serializable {
|
|||
this.notes = notes;
|
||||
}
|
||||
|
||||
|
||||
public static class ExternalizerImpl implements Externalizer<AuthenticatedClientSessionEntity> {
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, AuthenticatedClientSessionEntity session) throws IOException {
|
||||
MarshallUtil.marshallString(session.getAuthMethod(), output);
|
||||
MarshallUtil.marshallString(session.getRedirectUri(), output);
|
||||
MarshallUtil.marshallInt(output, session.getTimestamp());
|
||||
MarshallUtil.marshallString(session.getAction(), output);
|
||||
|
||||
Map<String, String> notes = session.getNotes();
|
||||
KeycloakMarshallUtil.writeMap(notes, KeycloakMarshallUtil.STRING_EXT, KeycloakMarshallUtil.STRING_EXT, output);
|
||||
|
||||
KeycloakMarshallUtil.writeCollection(session.getProtocolMappers(), KeycloakMarshallUtil.STRING_EXT, output);
|
||||
KeycloakMarshallUtil.writeCollection(session.getRoles(), KeycloakMarshallUtil.STRING_EXT, output);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public AuthenticatedClientSessionEntity readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
AuthenticatedClientSessionEntity sessionEntity = new AuthenticatedClientSessionEntity();
|
||||
|
||||
sessionEntity.setAuthMethod(MarshallUtil.unmarshallString(input));
|
||||
sessionEntity.setRedirectUri(MarshallUtil.unmarshallString(input));
|
||||
sessionEntity.setTimestamp(MarshallUtil.unmarshallInt(input));
|
||||
sessionEntity.setAction(MarshallUtil.unmarshallString(input));
|
||||
|
||||
Map<String, String> notes = KeycloakMarshallUtil.readMap(input, KeycloakMarshallUtil.STRING_EXT, KeycloakMarshallUtil.STRING_EXT,
|
||||
new KeycloakMarshallUtil.ConcurrentHashMapBuilder<>());
|
||||
sessionEntity.setNotes(notes);
|
||||
|
||||
Set<String> protocolMappers = KeycloakMarshallUtil.readCollection(input, KeycloakMarshallUtil.STRING_EXT, new KeycloakMarshallUtil.HashSetBuilder<>());
|
||||
sessionEntity.setProtocolMappers(protocolMappers);
|
||||
|
||||
Set<String> roles = KeycloakMarshallUtil.readCollection(input, KeycloakMarshallUtil.STRING_EXT, new KeycloakMarshallUtil.HashSetBuilder<>());
|
||||
sessionEntity.setRoles(roles);
|
||||
|
||||
return sessionEntity;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@ package org.keycloak.models.sessions.infinispan.entities;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
*/
|
||||
|
@ -60,4 +62,10 @@ public class SessionEntity implements Serializable {
|
|||
public int hashCode() {
|
||||
return id != null ? id.hashCode() : 0;
|
||||
}
|
||||
|
||||
|
||||
public SessionEntityWrapper mergeRemoteEntityWithLocalEntity(SessionEntityWrapper localEntityWrapper) {
|
||||
throw new IllegalStateException("Not yet implemented");
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -17,18 +17,31 @@
|
|||
|
||||
package org.keycloak.models.sessions.infinispan.entities;
|
||||
|
||||
import org.infinispan.commons.marshall.Externalizer;
|
||||
import org.infinispan.commons.marshall.MarshallUtil;
|
||||
import org.infinispan.commons.marshall.SerializeWith;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.UserSessionModel;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.util.KeycloakMarshallUtil;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
*/
|
||||
@SerializeWith(UserSessionEntity.ExternalizerImpl.class)
|
||||
public class UserSessionEntity extends SessionEntity {
|
||||
|
||||
public static final Logger logger = Logger.getLogger(UserSessionEntity.class);
|
||||
|
||||
// Tracks the "lastSessionRefresh" from userSession entity from remote cache
|
||||
public static final String LAST_SESSION_REFRESH_REMOTE = "lsrr";
|
||||
|
||||
private String user;
|
||||
|
||||
private String brokerSessionId;
|
||||
|
@ -147,4 +160,106 @@ public class UserSessionEntity extends SessionEntity {
|
|||
public void setBrokerUserId(String brokerUserId) {
|
||||
this.brokerUserId = brokerUserId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("UserSessionEntity [ id=%s, realm=%s, lastSessionRefresh=%d]", getId(), getRealm(), getLastSessionRefresh());
|
||||
}
|
||||
|
||||
@Override
|
||||
public SessionEntityWrapper mergeRemoteEntityWithLocalEntity(SessionEntityWrapper localEntityWrapper) {
|
||||
int lsrRemote = getLastSessionRefresh();
|
||||
|
||||
SessionEntityWrapper entityWrapper;
|
||||
if (localEntityWrapper == null) {
|
||||
entityWrapper = new SessionEntityWrapper<>(this);
|
||||
} else {
|
||||
UserSessionEntity localUserSession = (UserSessionEntity) localEntityWrapper.getEntity();
|
||||
|
||||
// local lastSessionRefresh should always contain the bigger
|
||||
if (lsrRemote < localUserSession.getLastSessionRefresh()) {
|
||||
setLastSessionRefresh(localUserSession.getLastSessionRefresh());
|
||||
}
|
||||
|
||||
entityWrapper = new SessionEntityWrapper<>(localEntityWrapper.getLocalMetadata(), this);
|
||||
}
|
||||
|
||||
entityWrapper.putLocalMetadataNoteInt(LAST_SESSION_REFRESH_REMOTE, lsrRemote);
|
||||
|
||||
logger.debugf("Updating session entity. lastSessionRefresh=%d, lastSessionRefreshRemote=%d", getLastSessionRefresh(), lsrRemote);
|
||||
|
||||
return entityWrapper;
|
||||
}
|
||||
|
||||
|
||||
public static class ExternalizerImpl implements Externalizer<UserSessionEntity> {
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, UserSessionEntity session) throws IOException {
|
||||
MarshallUtil.marshallString(session.getAuthMethod(), output);
|
||||
MarshallUtil.marshallString(session.getBrokerSessionId(), output);
|
||||
MarshallUtil.marshallString(session.getBrokerUserId(), output);
|
||||
MarshallUtil.marshallString(session.getId(), output);
|
||||
MarshallUtil.marshallString(session.getIpAddress(), output);
|
||||
MarshallUtil.marshallString(session.getLoginUsername(), output);
|
||||
MarshallUtil.marshallString(session.getRealm(), output);
|
||||
MarshallUtil.marshallString(session.getUser(), output);
|
||||
|
||||
MarshallUtil.marshallInt(output, session.getLastSessionRefresh());
|
||||
MarshallUtil.marshallInt(output, session.getStarted());
|
||||
output.writeBoolean(session.isRememberMe());
|
||||
|
||||
int state = session.getState() == null ? 0 :
|
||||
((session.getState() == UserSessionModel.State.LOGGED_IN) ? 1 : (session.getState() == UserSessionModel.State.LOGGED_OUT ? 2 : 3));
|
||||
output.writeInt(state);
|
||||
|
||||
Map<String, String> notes = session.getNotes();
|
||||
KeycloakMarshallUtil.writeMap(notes, KeycloakMarshallUtil.STRING_EXT, KeycloakMarshallUtil.STRING_EXT, output);
|
||||
|
||||
Map<String, AuthenticatedClientSessionEntity> authSessions = session.getAuthenticatedClientSessions();
|
||||
KeycloakMarshallUtil.writeMap(authSessions, KeycloakMarshallUtil.STRING_EXT, new AuthenticatedClientSessionEntity.ExternalizerImpl(), output);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public UserSessionEntity readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
UserSessionEntity sessionEntity = new UserSessionEntity();
|
||||
|
||||
sessionEntity.setAuthMethod(MarshallUtil.unmarshallString(input));
|
||||
sessionEntity.setBrokerSessionId(MarshallUtil.unmarshallString(input));
|
||||
sessionEntity.setBrokerUserId(MarshallUtil.unmarshallString(input));
|
||||
sessionEntity.setId(MarshallUtil.unmarshallString(input));
|
||||
sessionEntity.setIpAddress(MarshallUtil.unmarshallString(input));
|
||||
sessionEntity.setLoginUsername(MarshallUtil.unmarshallString(input));
|
||||
sessionEntity.setRealm(MarshallUtil.unmarshallString(input));
|
||||
sessionEntity.setUser(MarshallUtil.unmarshallString(input));
|
||||
|
||||
sessionEntity.setLastSessionRefresh(MarshallUtil.unmarshallInt(input));
|
||||
sessionEntity.setStarted(MarshallUtil.unmarshallInt(input));
|
||||
sessionEntity.setRememberMe(input.readBoolean());
|
||||
|
||||
int state = input.readInt();
|
||||
switch(state) {
|
||||
case 1: sessionEntity.setState(UserSessionModel.State.LOGGED_IN);
|
||||
break;
|
||||
case 2: sessionEntity.setState(UserSessionModel.State.LOGGED_OUT);
|
||||
break;
|
||||
case 3: sessionEntity.setState(UserSessionModel.State.LOGGING_OUT);
|
||||
break;
|
||||
default:
|
||||
sessionEntity.setState(null);
|
||||
}
|
||||
|
||||
Map<String, String> notes = KeycloakMarshallUtil.readMap(input, KeycloakMarshallUtil.STRING_EXT, KeycloakMarshallUtil.STRING_EXT,
|
||||
new KeycloakMarshallUtil.ConcurrentHashMapBuilder<>());
|
||||
sessionEntity.setNotes(notes);
|
||||
|
||||
Map<String, AuthenticatedClientSessionEntity> authSessions = KeycloakMarshallUtil.readMap(input, KeycloakMarshallUtil.STRING_EXT, new AuthenticatedClientSessionEntity.ExternalizerImpl(),
|
||||
new KeycloakMarshallUtil.ConcurrentHashMapBuilder<>());
|
||||
sessionEntity.setAuthenticatedClientSessions(authSessions);
|
||||
|
||||
return sessionEntity;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.events;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.cluster.ClusterEvent;
|
||||
import org.keycloak.cluster.ClusterListener;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.sessions.infinispan.InfinispanAuthenticationSessionProvider;
|
||||
import org.keycloak.models.sessions.infinispan.InfinispanAuthenticationSessionProviderFactory;
|
||||
import org.keycloak.models.utils.KeycloakModelUtils;
|
||||
import org.keycloak.sessions.AuthenticationSessionProvider;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public abstract class AbstractAuthSessionClusterListener <SE extends SessionClusterEvent> implements ClusterListener {
|
||||
|
||||
private static final Logger log = Logger.getLogger(AbstractAuthSessionClusterListener.class);
|
||||
|
||||
private final KeycloakSessionFactory sessionFactory;
|
||||
|
||||
public AbstractAuthSessionClusterListener(KeycloakSessionFactory sessionFactory) {
|
||||
this.sessionFactory = sessionFactory;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void eventReceived(ClusterEvent event) {
|
||||
KeycloakModelUtils.runJobInTransaction(sessionFactory, (KeycloakSession session) -> {
|
||||
InfinispanAuthenticationSessionProvider provider = (InfinispanAuthenticationSessionProvider) session.getProvider(AuthenticationSessionProvider.class,
|
||||
InfinispanAuthenticationSessionProviderFactory.PROVIDER_ID);
|
||||
SE sessionEvent = (SE) event;
|
||||
|
||||
if (!provider.getCache().getStatus().allowInvocations()) {
|
||||
log.debugf("Cache in state '%s' doesn't allow invocations", provider.getCache().getStatus());
|
||||
return;
|
||||
}
|
||||
|
||||
log.debugf("Received authentication session event '%s'", sessionEvent.toString());
|
||||
|
||||
eventReceived(session, provider, sessionEvent);
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
protected abstract void eventReceived(KeycloakSession session, InfinispanAuthenticationSessionProvider provider, SE sessionEvent);
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.events;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.cluster.ClusterEvent;
|
||||
import org.keycloak.cluster.ClusterListener;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.UserSessionProvider;
|
||||
import org.keycloak.models.sessions.infinispan.InfinispanUserSessionProvider;
|
||||
import org.keycloak.models.sessions.infinispan.InfinispanUserSessionProviderFactory;
|
||||
import org.keycloak.models.utils.KeycloakModelUtils;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public abstract class AbstractUserSessionClusterListener<SE extends SessionClusterEvent> implements ClusterListener {
|
||||
|
||||
private static final Logger log = Logger.getLogger(AbstractUserSessionClusterListener.class);
|
||||
|
||||
private final KeycloakSessionFactory sessionFactory;
|
||||
|
||||
public AbstractUserSessionClusterListener(KeycloakSessionFactory sessionFactory) {
|
||||
this.sessionFactory = sessionFactory;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void eventReceived(ClusterEvent event) {
|
||||
KeycloakModelUtils.runJobInTransaction(sessionFactory, (KeycloakSession session) -> {
|
||||
InfinispanUserSessionProvider provider = (InfinispanUserSessionProvider) session.getProvider(UserSessionProvider.class, InfinispanUserSessionProviderFactory.PROVIDER_ID);
|
||||
SE sessionEvent = (SE) event;
|
||||
|
||||
String realmId = sessionEvent.getRealmId();
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debugf("Received user session event '%s'", sessionEvent.toString());
|
||||
}
|
||||
|
||||
eventReceived(session, provider, sessionEvent);
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
protected abstract void eventReceived(KeycloakSession session, InfinispanUserSessionProvider provider, SE sessionEvent);
|
||||
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.events;
|
||||
|
||||
import org.keycloak.cluster.ClusterEvent;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class ClientRemovedSessionEvent implements SessionClusterEvent {
|
||||
|
||||
private String realmId;
|
||||
private String clientUuid;
|
||||
|
||||
public static ClientRemovedSessionEvent create(String realmId, String clientUuid) {
|
||||
ClientRemovedSessionEvent event = new ClientRemovedSessionEvent();
|
||||
event.realmId = realmId;
|
||||
event.clientUuid = clientUuid;
|
||||
return event;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("ClientRemovedSessionEvent [ realmId=%s , clientUuid=%s ]", realmId, clientUuid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRealmId() {
|
||||
return realmId;
|
||||
}
|
||||
|
||||
public String getClientUuid() {
|
||||
return clientUuid;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.events;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class RealmRemovedSessionEvent implements SessionClusterEvent {
|
||||
|
||||
private String realmId;
|
||||
|
||||
public static RealmRemovedSessionEvent create(String realmId) {
|
||||
RealmRemovedSessionEvent event = new RealmRemovedSessionEvent();
|
||||
event.realmId = realmId;
|
||||
return event;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("RealmRemovedSessionEvent [ realmId=%s ]", realmId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRealmId() {
|
||||
return realmId;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.events;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class RemoveAllUserLoginFailuresEvent implements SessionClusterEvent {
|
||||
|
||||
private String realmId;
|
||||
|
||||
public static RemoveAllUserLoginFailuresEvent create(String realmId) {
|
||||
RemoveAllUserLoginFailuresEvent event = new RemoveAllUserLoginFailuresEvent();
|
||||
event.realmId = realmId;
|
||||
return event;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("RemoveAllUserLoginFailuresEvent [ realmId=%s ]", realmId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRealmId() {
|
||||
return realmId;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.events;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class RemoveUserSessionsEvent implements SessionClusterEvent {
|
||||
|
||||
private String realmId;
|
||||
|
||||
public static RemoveUserSessionsEvent create(String realmId) {
|
||||
RemoveUserSessionsEvent event = new RemoveUserSessionsEvent();
|
||||
event.realmId = realmId;
|
||||
return event;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("RemoveUserSessionsEvent [ realmId=%s ]", realmId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRealmId() {
|
||||
return realmId;
|
||||
}
|
||||
}
|
|
@ -15,21 +15,15 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.mapreduce;
|
||||
package org.keycloak.models.sessions.infinispan.events;
|
||||
|
||||
import org.infinispan.distexec.mapreduce.Reducer;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Iterator;
|
||||
import org.keycloak.cluster.ClusterEvent;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class FirstResultReducer implements Reducer<Object, Object>, Serializable {
|
||||
public interface SessionClusterEvent extends ClusterEvent {
|
||||
|
||||
@Override
|
||||
public Object reduce(Object reducedKey, Iterator<Object> itr) {
|
||||
return itr.next();
|
||||
}
|
||||
String getRealmId();
|
||||
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.events;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.keycloak.cluster.ClusterProvider;
|
||||
import org.keycloak.common.util.MultivaluedHashMap;
|
||||
import org.keycloak.models.AbstractKeycloakTransaction;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
|
||||
/**
|
||||
* Postpone sending notifications of session events to the commit of Keycloak transaction
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class SessionEventsSenderTransaction extends AbstractKeycloakTransaction {
|
||||
|
||||
private final KeycloakSession session;
|
||||
|
||||
private final MultivaluedHashMap<String, SessionClusterEvent> sessionEvents = new MultivaluedHashMap<>();
|
||||
private final MultivaluedHashMap<String, SessionClusterEvent> localDCSessionEvents = new MultivaluedHashMap<>();
|
||||
|
||||
public SessionEventsSenderTransaction(KeycloakSession session) {
|
||||
this.session = session;
|
||||
}
|
||||
|
||||
public void addEvent(String eventName, SessionClusterEvent event, boolean sendToAllDCs) {
|
||||
if (sendToAllDCs) {
|
||||
sessionEvents.add(eventName, event);
|
||||
} else {
|
||||
localDCSessionEvents.add(eventName, event);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void commitImpl() {
|
||||
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
|
||||
|
||||
// TODO bulk notify (send whole list instead of separate events?)
|
||||
for (Map.Entry<String, List<SessionClusterEvent>> entry : sessionEvents.entrySet()) {
|
||||
for (SessionClusterEvent event : entry.getValue()) {
|
||||
cluster.notify(entry.getKey(), event, false, ClusterProvider.DCNotify.ALL_DCS);
|
||||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<String, List<SessionClusterEvent>> entry : localDCSessionEvents.entrySet()) {
|
||||
for (SessionClusterEvent event : entry.getValue()) {
|
||||
cluster.notify(entry.getKey(), event, false, ClusterProvider.DCNotify.LOCAL_DC_ONLY);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void rollbackImpl() {
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.initializer;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.lifecycle.ComponentStatus;
|
||||
import org.infinispan.remoting.transport.Transport;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.KeycloakSessionTask;
|
||||
import org.keycloak.models.utils.KeycloakModelUtils;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public abstract class BaseCacheInitializer extends CacheInitializer {
|
||||
|
||||
private static final String STATE_KEY_PREFIX = "distributed::";
|
||||
|
||||
private static final Logger log = Logger.getLogger(BaseCacheInitializer.class);
|
||||
|
||||
protected final KeycloakSessionFactory sessionFactory;
|
||||
protected final Cache<String, Serializable> workCache;
|
||||
protected final SessionLoader sessionLoader;
|
||||
protected final int sessionsPerSegment;
|
||||
protected final String stateKey;
|
||||
|
||||
public BaseCacheInitializer(KeycloakSessionFactory sessionFactory, Cache<String, Serializable> workCache, SessionLoader sessionLoader, String stateKeySuffix, int sessionsPerSegment) {
|
||||
this.sessionFactory = sessionFactory;
|
||||
this.workCache = workCache;
|
||||
this.sessionLoader = sessionLoader;
|
||||
this.sessionsPerSegment = sessionsPerSegment;
|
||||
this.stateKey = STATE_KEY_PREFIX + stateKeySuffix;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean isFinished() {
|
||||
// Check if we should skipLoadingSessions. This can happen if someone else already did the task (For example in cross-dc environment, it was done by different DC)
|
||||
boolean isFinishedAlready = this.sessionLoader.isFinished(this);
|
||||
if (isFinishedAlready) {
|
||||
return true;
|
||||
}
|
||||
|
||||
InitializerState state = getStateFromCache();
|
||||
return state != null && state.isFinished();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean isCoordinator() {
|
||||
Transport transport = workCache.getCacheManager().getTransport();
|
||||
return transport == null || transport.isCoordinator();
|
||||
}
|
||||
|
||||
|
||||
protected InitializerState getOrCreateInitializerState() {
|
||||
InitializerState state = getStateFromCache();
|
||||
if (state == null) {
|
||||
final int[] count = new int[1];
|
||||
|
||||
// Rather use separate transactions for update and counting
|
||||
|
||||
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
|
||||
@Override
|
||||
public void run(KeycloakSession session) {
|
||||
sessionLoader.init(session);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
|
||||
@Override
|
||||
public void run(KeycloakSession session) {
|
||||
count[0] = sessionLoader.getSessionsCount(session);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
state = new InitializerState();
|
||||
state.init(count[0], sessionsPerSegment);
|
||||
saveStateToCache(state);
|
||||
}
|
||||
return state;
|
||||
|
||||
}
|
||||
|
||||
|
||||
private InitializerState getStateFromCache() {
|
||||
// We ignore cacheStore for now, so that in Cross-DC scenario (with RemoteStore enabled) is the remoteStore ignored. This means that every DC needs to load offline sessions separately.
|
||||
return (InitializerState) workCache.getAdvancedCache()
|
||||
.withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD)
|
||||
.get(stateKey);
|
||||
}
|
||||
|
||||
|
||||
protected void saveStateToCache(final InitializerState state) {
|
||||
|
||||
// 3 attempts to send the message (it may fail if some node fails in the meantime)
|
||||
retry(3, new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
// Save this synchronously to ensure all nodes read correct state
|
||||
// We ignore cacheStore for now, so that in Cross-DC scenario (with RemoteStore enabled) is the remoteStore ignored. This means that every DC needs to load offline sessions separately.
|
||||
BaseCacheInitializer.this.workCache.getAdvancedCache().
|
||||
withFlags(Flag.IGNORE_RETURN_VALUES, Flag.FORCE_SYNCHRONOUS, Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD)
|
||||
.put(stateKey, state);
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
private void retry(int retry, Runnable runnable) {
|
||||
while (true) {
|
||||
try {
|
||||
runnable.run();
|
||||
return;
|
||||
} catch (RuntimeException e) {
|
||||
ComponentStatus status = workCache.getStatus();
|
||||
if (status.isStopping() || status.isTerminated()) {
|
||||
log.warn("Failed to put initializerState to the cache. Cache is already terminating");
|
||||
log.debug(e.getMessage(), e);
|
||||
return;
|
||||
}
|
||||
retry--;
|
||||
if (retry == 0) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public Cache<String, Serializable> getWorkCache() {
|
||||
return workCache;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.initializer;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public abstract class CacheInitializer {
|
||||
|
||||
private static final Logger log = Logger.getLogger(CacheInitializer.class);
|
||||
|
||||
public void initCache() {
|
||||
}
|
||||
|
||||
public void loadSessions() {
|
||||
while (!isFinished()) {
|
||||
if (!isCoordinator()) {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException ie) {
|
||||
log.error("Interrupted", ie);
|
||||
}
|
||||
} else {
|
||||
startLoading();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected abstract boolean isFinished();
|
||||
|
||||
protected abstract boolean isCoordinator();
|
||||
|
||||
/**
|
||||
* Just coordinator will run this
|
||||
*/
|
||||
protected abstract void startLoading();
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.initializer;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.dblock.DBLockManager;
|
||||
import org.keycloak.models.dblock.DBLockProvider;
|
||||
|
||||
/**
|
||||
* Encapsulates preloading of sessions within the DB Lock. This DB-aware lock ensures that "startLoading" is done on single DC and the other DCs need to wait.
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class DBLockBasedCacheInitializer extends CacheInitializer {
|
||||
|
||||
private static final Logger log = Logger.getLogger(DBLockBasedCacheInitializer.class);
|
||||
|
||||
private final KeycloakSession session;
|
||||
private final CacheInitializer delegate;
|
||||
|
||||
public DBLockBasedCacheInitializer(KeycloakSession session, CacheInitializer delegate) {
|
||||
this.session = session;
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void initCache() {
|
||||
delegate.initCache();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean isFinished() {
|
||||
return delegate.isFinished();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean isCoordinator() {
|
||||
return delegate.isCoordinator();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Just coordinator will run this. And there is DB-lock, so the delegate.startLoading() will be permitted just by the single DC
|
||||
*/
|
||||
@Override
|
||||
protected void startLoading() {
|
||||
DBLockManager dbLockManager = new DBLockManager(session);
|
||||
dbLockManager.checkForcedUnlock();
|
||||
DBLockProvider dbLock = dbLockManager.getDBLock();
|
||||
dbLock.waitForLock();
|
||||
try {
|
||||
|
||||
if (isFinished()) {
|
||||
log.infof("Task already finished when DBLock retrieved");
|
||||
} else {
|
||||
delegate.startLoading();
|
||||
}
|
||||
} finally {
|
||||
dbLock.releaseLock();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,15 +18,10 @@
|
|||
package org.keycloak.models.sessions.infinispan.initializer;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.distexec.DefaultExecutorService;
|
||||
import org.infinispan.lifecycle.ComponentStatus;
|
||||
import org.infinispan.remoting.transport.Transport;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.KeycloakSessionFactory;
|
||||
import org.keycloak.models.KeycloakSessionTask;
|
||||
import org.keycloak.models.utils.KeycloakModelUtils;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.LinkedList;
|
||||
|
@ -37,131 +32,34 @@ import java.util.concurrent.Executors;
|
|||
import java.util.concurrent.Future;
|
||||
|
||||
/**
|
||||
* Startup initialization for reading persistent userSessions/clientSessions to be filled into infinispan/memory . In cluster,
|
||||
* Startup initialization for reading persistent userSessions to be filled into infinispan/memory . In cluster,
|
||||
* the initialization is distributed among all cluster nodes, so the startup time is even faster
|
||||
*
|
||||
* TODO: Move to clusterService. Implementation is already pretty generic and doesn't contain any "userSession" specific stuff. All sessions-specific logic is in the SessionLoader implementation
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class InfinispanUserSessionInitializer {
|
||||
public class InfinispanCacheInitializer extends BaseCacheInitializer {
|
||||
|
||||
private static final String STATE_KEY_PREFIX = "distributed::";
|
||||
private static final Logger log = Logger.getLogger(InfinispanCacheInitializer.class);
|
||||
|
||||
private static final Logger log = Logger.getLogger(InfinispanUserSessionInitializer.class);
|
||||
|
||||
private final KeycloakSessionFactory sessionFactory;
|
||||
private final Cache<String, Serializable> workCache;
|
||||
private final SessionLoader sessionLoader;
|
||||
private final int maxErrors;
|
||||
private final int sessionsPerSegment;
|
||||
private final String stateKey;
|
||||
|
||||
|
||||
public InfinispanUserSessionInitializer(KeycloakSessionFactory sessionFactory, Cache<String, Serializable> workCache, SessionLoader sessionLoader, int maxErrors, int sessionsPerSegment, String stateKeySuffix) {
|
||||
this.sessionFactory = sessionFactory;
|
||||
this.workCache = workCache;
|
||||
this.sessionLoader = sessionLoader;
|
||||
public InfinispanCacheInitializer(KeycloakSessionFactory sessionFactory, Cache<String, Serializable> workCache, SessionLoader sessionLoader, String stateKeySuffix, int sessionsPerSegment, int maxErrors) {
|
||||
super(sessionFactory, workCache, sessionLoader, stateKeySuffix, sessionsPerSegment);
|
||||
this.maxErrors = maxErrors;
|
||||
this.sessionsPerSegment = sessionsPerSegment;
|
||||
this.stateKey = STATE_KEY_PREFIX + stateKeySuffix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initCache() {
|
||||
this.workCache.getAdvancedCache().getComponentRegistry().registerComponent(sessionFactory, KeycloakSessionFactory.class);
|
||||
}
|
||||
|
||||
|
||||
public void loadPersistentSessions() {
|
||||
if (isFinished()) {
|
||||
return;
|
||||
}
|
||||
|
||||
while (!isFinished()) {
|
||||
if (!isCoordinator()) {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException ie) {
|
||||
log.error("Interrupted", ie);
|
||||
}
|
||||
} else {
|
||||
startLoading();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private boolean isFinished() {
|
||||
InitializerState state = getStateFromCache();
|
||||
return state != null && state.isFinished();
|
||||
}
|
||||
|
||||
|
||||
private InitializerState getOrCreateInitializerState() {
|
||||
InitializerState state = getStateFromCache();
|
||||
if (state == null) {
|
||||
final int[] count = new int[1];
|
||||
|
||||
// Rather use separate transactions for update and counting
|
||||
|
||||
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
|
||||
@Override
|
||||
public void run(KeycloakSession session) {
|
||||
sessionLoader.init(session);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
|
||||
@Override
|
||||
public void run(KeycloakSession session) {
|
||||
count[0] = sessionLoader.getSessionsCount(session);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
state = new InitializerState();
|
||||
state.init(count[0], sessionsPerSegment);
|
||||
saveStateToCache(state);
|
||||
}
|
||||
return state;
|
||||
|
||||
}
|
||||
|
||||
private InitializerState getStateFromCache() {
|
||||
// TODO: We ignore cacheStore for now, so that in Cross-DC scenario (with RemoteStore enabled) is the remoteStore ignored. This means that every DC needs to load offline sessions separately.
|
||||
return (InitializerState) workCache.getAdvancedCache()
|
||||
.withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD)
|
||||
.get(stateKey);
|
||||
}
|
||||
|
||||
private void saveStateToCache(final InitializerState state) {
|
||||
|
||||
// 3 attempts to send the message (it may fail if some node fails in the meantime)
|
||||
retry(3, new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
// Save this synchronously to ensure all nodes read correct state
|
||||
// TODO: We ignore cacheStore for now, so that in Cross-DC scenario (with RemoteStore enabled) is the remoteStore ignored. This means that every DC needs to load offline sessions separately.
|
||||
InfinispanUserSessionInitializer.this.workCache.getAdvancedCache().
|
||||
withFlags(Flag.IGNORE_RETURN_VALUES, Flag.FORCE_SYNCHRONOUS, Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD)
|
||||
.put(stateKey, state);
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
private boolean isCoordinator() {
|
||||
Transport transport = workCache.getCacheManager().getTransport();
|
||||
return transport == null || transport.isCoordinator();
|
||||
}
|
||||
|
||||
|
||||
// Just coordinator will run this
|
||||
private void startLoading() {
|
||||
@Override
|
||||
protected void startLoading() {
|
||||
InitializerState state = getOrCreateInitializerState();
|
||||
|
||||
// Assume each worker has same processor's count
|
||||
|
@ -230,6 +128,10 @@ public class InfinispanUserSessionInitializer {
|
|||
log.debug("New initializer state pushed. The state is: " + state.printState());
|
||||
}
|
||||
}
|
||||
|
||||
// Loader callback after the task is finished
|
||||
this.sessionLoader.afterAllSessionsLoaded(this);
|
||||
|
||||
} finally {
|
||||
if (distributed) {
|
||||
executorService.shutdown();
|
||||
|
@ -238,25 +140,6 @@ public class InfinispanUserSessionInitializer {
|
|||
}
|
||||
}
|
||||
|
||||
private void retry(int retry, Runnable runnable) {
|
||||
while (true) {
|
||||
try {
|
||||
runnable.run();
|
||||
return;
|
||||
} catch (RuntimeException e) {
|
||||
ComponentStatus status = workCache.getStatus();
|
||||
if (status.isStopping() || status.isTerminated()) {
|
||||
log.warn("Failed to put initializerState to the cache. Cache is already terminating");
|
||||
log.debug(e.getMessage(), e);
|
||||
return;
|
||||
}
|
||||
retry--;
|
||||
if (retry == 0) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class WorkerResult implements Serializable {
|
||||
|
|
@ -17,20 +17,30 @@
|
|||
|
||||
package org.keycloak.models.sessions.infinispan.initializer;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.cluster.ClusterProvider;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.UserSessionModel;
|
||||
import org.keycloak.models.session.UserSessionPersisterProvider;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class OfflineUserSessionLoader implements SessionLoader {
|
||||
public class OfflinePersistentUserSessionLoader implements SessionLoader, Serializable {
|
||||
|
||||
private static final Logger log = Logger.getLogger(OfflinePersistentUserSessionLoader.class);
|
||||
|
||||
// Cross-DC aware flag
|
||||
public static final String PERSISTENT_SESSIONS_LOADED = "PERSISTENT_SESSIONS_LOADED";
|
||||
|
||||
// Just local-DC aware flag
|
||||
public static final String PERSISTENT_SESSIONS_LOADED_IN_CURRENT_DC = "PERSISTENT_SESSIONS_LOADED_IN_CURRENT_DC";
|
||||
|
||||
private static final Logger log = Logger.getLogger(OfflineUserSessionLoader.class);
|
||||
|
||||
@Override
|
||||
public void init(KeycloakSession session) {
|
||||
|
@ -45,12 +55,14 @@ public class OfflineUserSessionLoader implements SessionLoader {
|
|||
persister.updateAllTimestamps(clusterStartupTime);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int getSessionsCount(KeycloakSession session) {
|
||||
UserSessionPersisterProvider persister = session.getProvider(UserSessionPersisterProvider.class);
|
||||
return persister.getUserSessionsCount(true);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean loadSessions(KeycloakSession session, int first, int max) {
|
||||
if (log.isTraceEnabled()) {
|
||||
|
@ -70,4 +82,37 @@ public class OfflineUserSessionLoader implements SessionLoader {
|
|||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isFinished(BaseCacheInitializer initializer) {
|
||||
Cache<String, Serializable> workCache = initializer.getWorkCache();
|
||||
Boolean sessionsLoaded = (Boolean) workCache.get(PERSISTENT_SESSIONS_LOADED);
|
||||
|
||||
if (sessionsLoaded != null && sessionsLoaded) {
|
||||
log.debugf("Persistent sessions loaded already.");
|
||||
return true;
|
||||
} else {
|
||||
log.debugf("Persistent sessions not yet loaded.");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void afterAllSessionsLoaded(BaseCacheInitializer initializer) {
|
||||
Cache<String, Serializable> workCache = initializer.getWorkCache();
|
||||
|
||||
// Cross-DC aware flag
|
||||
workCache
|
||||
.getAdvancedCache().withFlags(Flag.SKIP_REMOTE_LOOKUP)
|
||||
.put(PERSISTENT_SESSIONS_LOADED, true);
|
||||
|
||||
// Just local-DC aware flag
|
||||
workCache
|
||||
.getAdvancedCache().withFlags(Flag.SKIP_REMOTE_LOOKUP, Flag.SKIP_CACHE_LOAD, Flag.SKIP_CACHE_STORE)
|
||||
.put(PERSISTENT_SESSIONS_LOADED_IN_CURRENT_DC, true);
|
||||
|
||||
|
||||
log.debugf("Persistent sessions loaded successfully!");
|
||||
}
|
||||
|
||||
}
|
|
@ -31,7 +31,7 @@ import java.util.Set;
|
|||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class SessionInitializerWorker implements DistributedCallable<String, Serializable, InfinispanUserSessionInitializer.WorkerResult>, Serializable {
|
||||
public class SessionInitializerWorker implements DistributedCallable<String, Serializable, InfinispanCacheInitializer.WorkerResult>, Serializable {
|
||||
|
||||
private static final Logger log = Logger.getLogger(SessionInitializerWorker.class);
|
||||
|
||||
|
@ -53,7 +53,7 @@ public class SessionInitializerWorker implements DistributedCallable<String, Ser
|
|||
}
|
||||
|
||||
@Override
|
||||
public InfinispanUserSessionInitializer.WorkerResult call() throws Exception {
|
||||
public InfinispanCacheInitializer.WorkerResult call() throws Exception {
|
||||
if (log.isTraceEnabled()) {
|
||||
log.tracef("Running computation for segment: %d", segment);
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ public class SessionInitializerWorker implements DistributedCallable<String, Ser
|
|||
KeycloakSessionFactory sessionFactory = workCache.getAdvancedCache().getComponentRegistry().getComponent(KeycloakSessionFactory.class);
|
||||
if (sessionFactory == null) {
|
||||
log.debugf("KeycloakSessionFactory not yet set in cache. Worker skipped");
|
||||
return InfinispanUserSessionInitializer.WorkerResult.create(segment, false);
|
||||
return InfinispanCacheInitializer.WorkerResult.create(segment, false);
|
||||
}
|
||||
|
||||
final int first = segment * sessionsPerSegment;
|
||||
|
@ -76,7 +76,7 @@ public class SessionInitializerWorker implements DistributedCallable<String, Ser
|
|||
|
||||
});
|
||||
|
||||
return InfinispanUserSessionInitializer.WorkerResult.create(segment, true);
|
||||
return InfinispanCacheInitializer.WorkerResult.create(segment, true);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,11 +24,51 @@ import java.io.Serializable;
|
|||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public interface SessionLoader extends Serializable {
|
||||
public interface SessionLoader {
|
||||
|
||||
/**
|
||||
* Will be triggered just once on cluster coordinator node to perform some generic initialization tasks (Eg. update DB before starting load).
|
||||
*
|
||||
* NOTE: This shouldn't be used for the initialization of loader instance itself!
|
||||
*
|
||||
* @param session
|
||||
*/
|
||||
void init(KeycloakSession session);
|
||||
|
||||
|
||||
/**
|
||||
* Will be triggered just once on cluster coordinator node to count the number of sessions
|
||||
*
|
||||
* @param session
|
||||
* @return
|
||||
*/
|
||||
int getSessionsCount(KeycloakSession session);
|
||||
|
||||
|
||||
/**
|
||||
* Will be called on all cluster nodes to load the specified page.
|
||||
*
|
||||
* @param session
|
||||
* @param first
|
||||
* @param max
|
||||
* @return
|
||||
*/
|
||||
boolean loadSessions(KeycloakSession session, int first, int max);
|
||||
|
||||
|
||||
/**
|
||||
* This will be called on nodes to check if loading is finished. It allows loader to notify that loading is finished for some reason.
|
||||
*
|
||||
* @param initializer
|
||||
* @return
|
||||
*/
|
||||
boolean isFinished(BaseCacheInitializer initializer);
|
||||
|
||||
|
||||
/**
|
||||
* Callback triggered on cluster coordinator once it recognize that all sessions were successfully loaded
|
||||
*
|
||||
* @param initializer
|
||||
*/
|
||||
void afterAllSessionsLoaded(BaseCacheInitializer initializer);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.initializer;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
|
||||
/**
|
||||
* This impl is able to run the non-paginatable loader task and hence will be executed just on single node.
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class SingleWorkerCacheInitializer extends BaseCacheInitializer {
|
||||
|
||||
private final KeycloakSession session;
|
||||
|
||||
public SingleWorkerCacheInitializer(KeycloakSession session, Cache<String, Serializable> workCache, SessionLoader sessionLoader, String stateKeySuffix) {
|
||||
super(session.getKeycloakSessionFactory(), workCache, sessionLoader, stateKeySuffix, Integer.MAX_VALUE);
|
||||
this.session = session;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void startLoading() {
|
||||
InitializerState state = getOrCreateInitializerState();
|
||||
while (!state.isFinished()) {
|
||||
sessionLoader.loadSessions(session, -1, -1);
|
||||
state.markSegmentFinished(0);
|
||||
saveStateToCache(state);
|
||||
}
|
||||
|
||||
// Loader callback after the task is finished
|
||||
this.sessionLoader.afterAllSessionsLoaded(this);
|
||||
}
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.mapreduce;
|
||||
|
||||
import org.infinispan.distexec.mapreduce.Collector;
|
||||
import org.infinispan.distexec.mapreduce.Mapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
*/
|
||||
public class SessionMapper implements Mapper<String, SessionEntity, String, Object>, Serializable {
|
||||
|
||||
public SessionMapper(String realm) {
|
||||
this.realm = realm;
|
||||
}
|
||||
|
||||
private enum EmitValue {
|
||||
KEY, ENTITY
|
||||
}
|
||||
|
||||
private String realm;
|
||||
|
||||
private EmitValue emit = EmitValue.ENTITY;
|
||||
|
||||
public static SessionMapper create(String realm) {
|
||||
return new SessionMapper(realm);
|
||||
}
|
||||
|
||||
public SessionMapper emitKey() {
|
||||
emit = EmitValue.KEY;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void map(String key, SessionEntity e, Collector collector) {
|
||||
if (!realm.equals(e.getRealm())) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (emit) {
|
||||
case KEY:
|
||||
collector.emit(key, key);
|
||||
break;
|
||||
case ENTITY:
|
||||
collector.emit(key, e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.mapreduce;
|
||||
|
||||
import org.infinispan.distexec.mapreduce.Collector;
|
||||
import org.infinispan.distexec.mapreduce.Mapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.LoginFailureEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.LoginFailureKey;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
*/
|
||||
public class UserLoginFailureMapper implements Mapper<LoginFailureKey, LoginFailureEntity, LoginFailureKey, Object>, Serializable {
|
||||
|
||||
public UserLoginFailureMapper(String realm) {
|
||||
this.realm = realm;
|
||||
}
|
||||
|
||||
private enum EmitValue {
|
||||
KEY, ENTITY
|
||||
}
|
||||
|
||||
private String realm;
|
||||
|
||||
private EmitValue emit = EmitValue.ENTITY;
|
||||
|
||||
public static UserLoginFailureMapper create(String realm) {
|
||||
return new UserLoginFailureMapper(realm);
|
||||
}
|
||||
|
||||
public UserLoginFailureMapper emitKey() {
|
||||
emit = EmitValue.KEY;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void map(LoginFailureKey key, LoginFailureEntity e, Collector collector) {
|
||||
if (!realm.equals(e.getRealm())) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (emit) {
|
||||
case KEY:
|
||||
collector.emit(key, key);
|
||||
break;
|
||||
case ENTITY:
|
||||
collector.emit(key, e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.mapreduce;
|
||||
|
||||
import org.infinispan.distexec.mapreduce.Collector;
|
||||
import org.infinispan.distexec.mapreduce.Mapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
*/
|
||||
public class UserSessionMapper implements Mapper<String, SessionEntity, String, Object>, Serializable {
|
||||
|
||||
public UserSessionMapper(String realm) {
|
||||
this.realm = realm;
|
||||
}
|
||||
|
||||
private enum EmitValue {
|
||||
KEY, ENTITY
|
||||
}
|
||||
|
||||
private String realm;
|
||||
|
||||
private EmitValue emit = EmitValue.ENTITY;
|
||||
|
||||
private String user;
|
||||
|
||||
private Integer expired;
|
||||
|
||||
private Integer expiredRefresh;
|
||||
|
||||
private String brokerSessionId;
|
||||
private String brokerUserId;
|
||||
|
||||
public static UserSessionMapper create(String realm) {
|
||||
return new UserSessionMapper(realm);
|
||||
}
|
||||
|
||||
public UserSessionMapper emitKey() {
|
||||
emit = EmitValue.KEY;
|
||||
return this;
|
||||
}
|
||||
|
||||
public UserSessionMapper user(String user) {
|
||||
this.user = user;
|
||||
return this;
|
||||
}
|
||||
|
||||
public UserSessionMapper expired(Integer expired, Integer expiredRefresh) {
|
||||
this.expired = expired;
|
||||
this.expiredRefresh = expiredRefresh;
|
||||
return this;
|
||||
}
|
||||
|
||||
public UserSessionMapper brokerSessionId(String id) {
|
||||
this.brokerSessionId = id;
|
||||
return this;
|
||||
}
|
||||
|
||||
public UserSessionMapper brokerUserId(String id) {
|
||||
this.brokerUserId = id;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void map(String key, SessionEntity e, Collector collector) {
|
||||
if (!(e instanceof UserSessionEntity)) {
|
||||
return;
|
||||
}
|
||||
|
||||
UserSessionEntity entity = (UserSessionEntity) e;
|
||||
|
||||
if (!realm.equals(entity.getRealm())) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (user != null && !entity.getUser().equals(user)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (brokerSessionId != null && !brokerSessionId.equals(entity.getBrokerSessionId())) return;
|
||||
if (brokerUserId != null && !brokerUserId.equals(entity.getBrokerUserId())) return;
|
||||
|
||||
if (expired != null && expiredRefresh != null && entity.getStarted() > expired && entity.getLastSessionRefresh() > expiredRefresh) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (expired == null && expiredRefresh != null && entity.getLastSessionRefresh() > expiredRefresh) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (emit) {
|
||||
case KEY:
|
||||
collector.emit(key, key);
|
||||
break;
|
||||
case ENTITY:
|
||||
collector.emit(key, entity);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.mapreduce;
|
||||
|
||||
import org.infinispan.distexec.mapreduce.Collector;
|
||||
import org.infinispan.distexec.mapreduce.Mapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
*/
|
||||
public class UserSessionNoteMapper implements Mapper<String, SessionEntity, String, Object>, Serializable {
|
||||
|
||||
public UserSessionNoteMapper(String realm) {
|
||||
this.realm = realm;
|
||||
}
|
||||
|
||||
private enum EmitValue {
|
||||
KEY, ENTITY
|
||||
}
|
||||
|
||||
private String realm;
|
||||
|
||||
private EmitValue emit = EmitValue.ENTITY;
|
||||
private Map<String, String> notes;
|
||||
|
||||
public static UserSessionNoteMapper create(String realm) {
|
||||
return new UserSessionNoteMapper(realm);
|
||||
}
|
||||
|
||||
public UserSessionNoteMapper emitKey() {
|
||||
emit = EmitValue.KEY;
|
||||
return this;
|
||||
}
|
||||
|
||||
public UserSessionNoteMapper notes(Map<String, String> notes) {
|
||||
this.notes = notes;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void map(String key, SessionEntity e, Collector collector) {
|
||||
if (!(e instanceof UserSessionEntity)) {
|
||||
return;
|
||||
}
|
||||
|
||||
UserSessionEntity entity = (UserSessionEntity) e;
|
||||
|
||||
if (!realm.equals(entity.getRealm())) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (Map.Entry<String, String> entry : notes.entrySet()) {
|
||||
String note = entity.getNotes().get(entry.getKey());
|
||||
if (note == null) return;
|
||||
if (!note.equals(entry.getValue())) return;
|
||||
}
|
||||
|
||||
switch (emit) {
|
||||
case KEY:
|
||||
collector.emit(key, key);
|
||||
break;
|
||||
case ENTITY:
|
||||
collector.emit(key, entity);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.remotestore;
|
||||
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
import org.infinispan.commons.configuration.ConfiguredBy;
|
||||
import org.infinispan.filter.KeyFilter;
|
||||
import org.infinispan.marshall.core.MarshalledEntry;
|
||||
import org.infinispan.metadata.InternalMetadata;
|
||||
import org.infinispan.persistence.remote.RemoteStore;
|
||||
import org.infinispan.persistence.spi.PersistenceException;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
@ConfiguredBy(KcRemoteStoreConfiguration.class)
|
||||
public class KcRemoteStore extends RemoteStore {
|
||||
|
||||
protected static final Logger logger = Logger.getLogger(KcRemoteStore.class);
|
||||
|
||||
private String cacheName;
|
||||
|
||||
@Override
|
||||
public void start() throws PersistenceException {
|
||||
super.start();
|
||||
if (getRemoteCache() == null) {
|
||||
String cacheName = getConfiguration().remoteCacheName();
|
||||
throw new IllegalStateException("Remote cache '" + cacheName + "' is not available.");
|
||||
}
|
||||
this.cacheName = getRemoteCache().getName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MarshalledEntry load(Object key) throws PersistenceException {
|
||||
logger.debugf("Calling load: '%s' for remote cache '%s'", key, cacheName);
|
||||
|
||||
MarshalledEntry entry = super.load(key);
|
||||
if (entry == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// wrap remote entity
|
||||
SessionEntity entity = (SessionEntity) entry.getValue();
|
||||
SessionEntityWrapper entityWrapper = new SessionEntityWrapper(entity);
|
||||
|
||||
MarshalledEntry wrappedEntry = marshalledEntry(entry.getKey(), entityWrapper, entry.getMetadata());
|
||||
|
||||
logger.debugf("Found entry in load: %s", wrappedEntry.toString());
|
||||
|
||||
return wrappedEntry;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void process(KeyFilter filter, CacheLoaderTask task, Executor executor, boolean fetchValue, boolean fetchMetadata) {
|
||||
logger.infof("Calling process with filter '%s' on cache '%s'", filter, cacheName);
|
||||
super.process(filter, task, executor, fetchValue, fetchMetadata);
|
||||
}
|
||||
|
||||
|
||||
// Don't do anything. Writes handled by KC itself as we need more flexibility
|
||||
@Override
|
||||
public void write(MarshalledEntry entry) throws PersistenceException {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean delete(Object key) throws PersistenceException {
|
||||
logger.debugf("Calling delete for key '%s' on cache '%s'", key, cacheName);
|
||||
|
||||
// Optimization - we don't need to know the previous value. Also it's ok to trigger asynchronously
|
||||
getRemoteCache().removeAsync(key);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
protected MarshalledEntry marshalledEntry(Object key, Object value, InternalMetadata metadata) {
|
||||
return ctx.getMarshalledEntryFactory().newMarshalledEntry(key, value, metadata);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.remotestore;
|
||||
|
||||
import org.infinispan.commons.configuration.BuiltBy;
|
||||
import org.infinispan.commons.configuration.ConfigurationFor;
|
||||
import org.infinispan.commons.configuration.attributes.AttributeSet;
|
||||
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
|
||||
import org.infinispan.configuration.cache.SingletonStoreConfiguration;
|
||||
import org.infinispan.persistence.remote.configuration.ConnectionPoolConfiguration;
|
||||
import org.infinispan.persistence.remote.configuration.ExecutorFactoryConfiguration;
|
||||
import org.infinispan.persistence.remote.configuration.RemoteStoreConfiguration;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
@BuiltBy(KcRemoteStoreConfigurationBuilder.class)
|
||||
@ConfigurationFor(KcRemoteStore.class)
|
||||
public class KcRemoteStoreConfiguration extends RemoteStoreConfiguration {
|
||||
|
||||
public KcRemoteStoreConfiguration(AttributeSet attributes, AsyncStoreConfiguration async, SingletonStoreConfiguration singletonStore,
|
||||
ExecutorFactoryConfiguration asyncExecutorFactory, ConnectionPoolConfiguration connectionPool) {
|
||||
super(attributes, async, singletonStore, asyncExecutorFactory, connectionPool);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.remotestore;
|
||||
|
||||
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
|
||||
import org.infinispan.persistence.remote.configuration.RemoteStoreConfiguration;
|
||||
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class KcRemoteStoreConfigurationBuilder extends RemoteStoreConfigurationBuilder {
|
||||
|
||||
public KcRemoteStoreConfigurationBuilder(PersistenceConfigurationBuilder builder) {
|
||||
super(builder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KcRemoteStoreConfiguration create() {
|
||||
RemoteStoreConfiguration cfg = super.create();
|
||||
KcRemoteStoreConfiguration cfg2 = new KcRemoteStoreConfiguration(cfg.attributes(), cfg.async(), cfg.singletonStore(), cfg.asyncExecutorFactory(), cfg.connectionPool());
|
||||
return cfg2;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,163 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.remotestore;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.infinispan.client.hotrod.Flag;
|
||||
import org.infinispan.client.hotrod.RemoteCache;
|
||||
import org.infinispan.client.hotrod.VersionedValue;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class RemoteCacheInvoker {
|
||||
|
||||
public static final Logger logger = Logger.getLogger(RemoteCacheInvoker.class);
|
||||
|
||||
private final Map<String, RemoteCacheContext> remoteCaches = new HashMap<>();
|
||||
|
||||
|
||||
public void addRemoteCache(String cacheName, RemoteCache remoteCache, MaxIdleTimeLoader maxIdleLoader) {
|
||||
RemoteCacheContext ctx = new RemoteCacheContext(remoteCache, maxIdleLoader);
|
||||
remoteCaches.put(cacheName, ctx);
|
||||
}
|
||||
|
||||
public Set<String> getRemoteCacheNames() {
|
||||
return Collections.unmodifiableSet(remoteCaches.keySet());
|
||||
}
|
||||
|
||||
|
||||
public <S extends SessionEntity> void runTask(KeycloakSession kcSession, RealmModel realm, String cacheName, String key, SessionUpdateTask<S> task, SessionEntityWrapper<S> sessionWrapper) {
|
||||
RemoteCacheContext context = remoteCaches.get(cacheName);
|
||||
if (context == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
S session = sessionWrapper.getEntity();
|
||||
|
||||
SessionUpdateTask.CacheOperation operation = task.getOperation(session);
|
||||
SessionUpdateTask.CrossDCMessageStatus status = task.getCrossDCMessageStatus(sessionWrapper);
|
||||
|
||||
if (status == SessionUpdateTask.CrossDCMessageStatus.NOT_NEEDED) {
|
||||
logger.debugf("Skip writing to remoteCache for entity '%s' of cache '%s' and operation '%s'", key, cacheName, operation.toString());
|
||||
return;
|
||||
}
|
||||
|
||||
long maxIdleTimeMs = context.maxIdleTimeLoader.getMaxIdleTimeMs(realm);
|
||||
|
||||
// Double the timeout to ensure that entry won't expire on remoteCache in case that write of some entities to remoteCache is postponed (eg. userSession.lastSessionRefresh)
|
||||
maxIdleTimeMs = maxIdleTimeMs * 2;
|
||||
|
||||
logger.debugf("Running task '%s' on remote cache '%s' . Key is '%s'", operation, cacheName, key);
|
||||
|
||||
runOnRemoteCache(context.remoteCache, maxIdleTimeMs, key, task, session);
|
||||
}
|
||||
|
||||
|
||||
private <S extends SessionEntity> void runOnRemoteCache(RemoteCache remoteCache, long maxIdleMs, String key, SessionUpdateTask<S> task, S session) {
|
||||
SessionUpdateTask.CacheOperation operation = task.getOperation(session);
|
||||
|
||||
switch (operation) {
|
||||
case REMOVE:
|
||||
// REMOVE already handled at remote cache store level
|
||||
//remoteCache.remove(key);
|
||||
break;
|
||||
case ADD:
|
||||
remoteCache.put(key, session, task.getLifespanMs(), TimeUnit.MILLISECONDS, maxIdleMs, TimeUnit.MILLISECONDS);
|
||||
break;
|
||||
case ADD_IF_ABSENT:
|
||||
SessionEntity existing = (SessionEntity) remoteCache
|
||||
.withFlags(Flag.FORCE_RETURN_VALUE)
|
||||
.putIfAbsent(key, session, -1, TimeUnit.MILLISECONDS, maxIdleMs, TimeUnit.MILLISECONDS);
|
||||
if (existing != null) {
|
||||
throw new IllegalStateException("There is already existing value in cache for key " + key);
|
||||
}
|
||||
break;
|
||||
case REPLACE:
|
||||
replace(remoteCache, task.getLifespanMs(), maxIdleMs, key, task);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unsupported state " + operation);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private <S extends SessionEntity> void replace(RemoteCache remoteCache, long lifespanMs, long maxIdleMs, String key, SessionUpdateTask<S> task) {
|
||||
boolean replaced = false;
|
||||
while (!replaced) {
|
||||
VersionedValue<S> versioned = remoteCache.getVersioned(key);
|
||||
if (versioned == null) {
|
||||
logger.warnf("Not found entity to replace for key '%s'", key);
|
||||
return;
|
||||
}
|
||||
|
||||
S session = versioned.getValue();
|
||||
|
||||
// Run task on the remote session
|
||||
task.runUpdate(session);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debugf("Before replaceWithVersion. Written entity: %s", session.toString());
|
||||
}
|
||||
|
||||
replaced = remoteCache.replaceWithVersion(key, session, versioned.getVersion(), lifespanMs, TimeUnit.MILLISECONDS, maxIdleMs, TimeUnit.MILLISECONDS);
|
||||
|
||||
if (!replaced) {
|
||||
logger.debugf("Failed to replace entity '%s' . Will retry again", key);
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debugf("Replaced entity in remote cache: %s", session.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private class RemoteCacheContext {
|
||||
|
||||
private final RemoteCache remoteCache;
|
||||
private final MaxIdleTimeLoader maxIdleTimeLoader;
|
||||
|
||||
public RemoteCacheContext(RemoteCache remoteCache, MaxIdleTimeLoader maxIdleLoader) {
|
||||
this.remoteCache = remoteCache;
|
||||
this.maxIdleTimeLoader = maxIdleLoader;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@FunctionalInterface
|
||||
public interface MaxIdleTimeLoader {
|
||||
|
||||
long getMaxIdleTimeMs(RealmModel realm);
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.remotestore;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.client.hotrod.RemoteCache;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheEntryCreated;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheEntryModified;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheEntryRemoved;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheFailover;
|
||||
import org.infinispan.client.hotrod.annotation.ClientListener;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheEntryCreatedEvent;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheEntryModifiedEvent;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheEntryRemovedEvent;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheFailoverEvent;
|
||||
import org.infinispan.client.hotrod.event.ClientEvent;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.util.InfinispanUtil;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
@ClientListener
|
||||
public class RemoteCacheSessionListener {
|
||||
|
||||
protected static final Logger logger = Logger.getLogger(RemoteCacheSessionListener.class);
|
||||
|
||||
private Cache<String, SessionEntityWrapper> cache;
|
||||
private RemoteCache remoteCache;
|
||||
private boolean distributed;
|
||||
private String myAddress;
|
||||
|
||||
|
||||
protected RemoteCacheSessionListener() {
|
||||
}
|
||||
|
||||
|
||||
protected void init(KeycloakSession session, Cache<String, SessionEntityWrapper> cache, RemoteCache remoteCache) {
|
||||
this.cache = cache;
|
||||
this.remoteCache = remoteCache;
|
||||
|
||||
this.distributed = InfinispanUtil.isDistributedCache(cache);
|
||||
if (this.distributed) {
|
||||
this.myAddress = InfinispanUtil.getMyAddress(session);
|
||||
} else {
|
||||
this.myAddress = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ClientCacheEntryCreated
|
||||
public void created(ClientCacheEntryCreatedEvent event) {
|
||||
String key = (String) event.getKey();
|
||||
|
||||
if (shouldUpdateLocalCache(event.getType(), key, event.isCommandRetried())) {
|
||||
// Should load it from remoteStore
|
||||
cache.get(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ClientCacheEntryModified
|
||||
public void updated(ClientCacheEntryModifiedEvent event) {
|
||||
String key = (String) event.getKey();
|
||||
|
||||
if (shouldUpdateLocalCache(event.getType(), key, event.isCommandRetried())) {
|
||||
|
||||
replaceRemoteEntityInCache(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void replaceRemoteEntityInCache(String key) {
|
||||
// TODO can be optimized and remoteSession sent in the event itself?
|
||||
SessionEntityWrapper localEntityWrapper = cache.get(key);
|
||||
SessionEntity remoteSession = (SessionEntity) remoteCache.get(key);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debugf("Read session. Entity read from remote cache: %s", remoteSession.toString());
|
||||
}
|
||||
|
||||
SessionEntityWrapper sessionWrapper = remoteSession.mergeRemoteEntityWithLocalEntity(localEntityWrapper);
|
||||
|
||||
// We received event from remoteCache, so we won't update it back
|
||||
cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD, Flag.IGNORE_RETURN_VALUES)
|
||||
.replace(key, sessionWrapper);
|
||||
}
|
||||
|
||||
|
||||
@ClientCacheEntryRemoved
|
||||
public void removed(ClientCacheEntryRemovedEvent event) {
|
||||
String key = (String) event.getKey();
|
||||
|
||||
if (shouldUpdateLocalCache(event.getType(), key, event.isCommandRetried())) {
|
||||
// We received event from remoteCache, so we won't update it back
|
||||
cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD, Flag.IGNORE_RETURN_VALUES)
|
||||
.remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ClientCacheFailover
|
||||
public void failover(ClientCacheFailoverEvent event) {
|
||||
logger.infof("Received failover event: " + event.toString());
|
||||
}
|
||||
|
||||
|
||||
// For distributed caches, ensure that local modification is executed just on owner OR if event.isCommandRetried
|
||||
protected boolean shouldUpdateLocalCache(ClientEvent.Type type, String key, boolean commandRetried) {
|
||||
boolean result;
|
||||
|
||||
// Case when cache is stopping or stopped already
|
||||
if (!cache.getStatus().allowInvocations()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!distributed || commandRetried) {
|
||||
result = true;
|
||||
} else {
|
||||
String keyAddress = InfinispanUtil.getKeyPrimaryOwnerAddress(cache, key);
|
||||
result = myAddress.equals(keyAddress);
|
||||
}
|
||||
|
||||
logger.debugf("Received event from remote store. Event '%s', key '%s', skip '%b'", type.toString(), key, !result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ClientListener(includeCurrentState = true)
|
||||
public static class FetchInitialStateCacheListener extends RemoteCacheSessionListener {
|
||||
}
|
||||
|
||||
|
||||
@ClientListener(includeCurrentState = false)
|
||||
public static class DontFetchInitialStateCacheListener extends RemoteCacheSessionListener {
|
||||
}
|
||||
|
||||
|
||||
public static RemoteCacheSessionListener createListener(KeycloakSession session, Cache<String, SessionEntityWrapper> cache, RemoteCache remoteCache) {
|
||||
/*boolean isCoordinator = InfinispanUtil.isCoordinator(cache);
|
||||
|
||||
// Just cluster coordinator will fetch userSessions from remote cache.
|
||||
// In case that coordinator is failover during state fetch, there is slight risk that not all userSessions will be fetched to local cluster. Assume acceptable for now
|
||||
RemoteCacheSessionListener listener;
|
||||
if (isCoordinator) {
|
||||
logger.infof("Will fetch initial state from remote cache for cache '%s'", cache.getName());
|
||||
listener = new FetchInitialStateCacheListener();
|
||||
} else {
|
||||
logger.infof("Won't fetch initial state from remote cache for cache '%s'", cache.getName());
|
||||
listener = new DontFetchInitialStateCacheListener();
|
||||
}*/
|
||||
|
||||
RemoteCacheSessionListener listener = new RemoteCacheSessionListener();
|
||||
listener.init(session, cache, remoteCache);
|
||||
|
||||
return listener;
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.remotestore;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Map;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.client.hotrod.RemoteCache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.BaseCacheInitializer;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.OfflinePersistentUserSessionLoader;
|
||||
import org.keycloak.models.sessions.infinispan.initializer.SessionLoader;
|
||||
import org.keycloak.models.sessions.infinispan.util.InfinispanUtil;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class RemoteCacheSessionsLoader implements SessionLoader {
|
||||
|
||||
private static final Logger log = Logger.getLogger(RemoteCacheSessionsLoader.class);
|
||||
|
||||
// Hardcoded limit for now. See if needs to be configurable (or if preloading can be enabled/disabled in configuration)
|
||||
public static final int LIMIT = 100000;
|
||||
|
||||
private final String cacheName;
|
||||
|
||||
public RemoteCacheSessionsLoader(String cacheName) {
|
||||
this.cacheName = cacheName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(KeycloakSession session) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSessionsCount(KeycloakSession session) {
|
||||
RemoteCache remoteCache = InfinispanUtil.getRemoteCache(getCache(session));
|
||||
return remoteCache.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean loadSessions(KeycloakSession session, int first, int max) {
|
||||
Cache cache = getCache(session);
|
||||
Cache decoratedCache = cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD, Flag.SKIP_CACHE_STORE, Flag.IGNORE_RETURN_VALUES);
|
||||
|
||||
RemoteCache<?, ?> remoteCache = InfinispanUtil.getRemoteCache(cache);
|
||||
|
||||
int size = remoteCache.size();
|
||||
|
||||
if (size > LIMIT) {
|
||||
log.infof("Skip bulk load of '%d' sessions from remote cache '%s'. Sessions will be retrieved lazily", size, cache.getName());
|
||||
return true;
|
||||
} else {
|
||||
log.infof("Will do bulk load of '%d' sessions from remote cache '%s'", size, cache.getName());
|
||||
}
|
||||
|
||||
|
||||
for (Map.Entry<?, ?> entry : remoteCache.getBulk().entrySet()) {
|
||||
SessionEntity entity = (SessionEntity) entry.getValue();
|
||||
SessionEntityWrapper entityWrapper = new SessionEntityWrapper(entity);
|
||||
|
||||
decoratedCache.putAsync(entry.getKey(), entityWrapper);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
private Cache getCache(KeycloakSession session) {
|
||||
InfinispanConnectionProvider ispn = session.getProvider(InfinispanConnectionProvider.class);
|
||||
return ispn.getCache(cacheName);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isFinished(BaseCacheInitializer initializer) {
|
||||
Cache<String, Serializable> workCache = initializer.getWorkCache();
|
||||
|
||||
// Check if persistent sessions were already loaded in this DC. This is possible just for offline sessions ATM
|
||||
Boolean sessionsLoaded = (Boolean) workCache
|
||||
.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD, Flag.SKIP_CACHE_STORE)
|
||||
.get(OfflinePersistentUserSessionLoader.PERSISTENT_SESSIONS_LOADED_IN_CURRENT_DC);
|
||||
|
||||
if (cacheName.equals(InfinispanConnectionProvider.OFFLINE_SESSION_CACHE_NAME) && sessionsLoaded != null && sessionsLoaded) {
|
||||
log.debugf("Sessions already loaded in current DC. Skip sessions loading from remote cache '%s'", cacheName);
|
||||
return true;
|
||||
} else {
|
||||
log.debugf("Sessions maybe not yet loaded in current DC. Will load them from remote cache '%s'", cacheName);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void afterAllSessionsLoaded(BaseCacheInitializer initializer) {
|
||||
|
||||
}
|
||||
}
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.keycloak.models.sessions.infinispan.stream;
|
||||
|
||||
import org.keycloak.models.sessions.infinispan.UserSessionTimestamp;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.LoginFailureEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.LoginFailureKey;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
|
@ -25,7 +25,6 @@ import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
|||
|
||||
import java.io.Serializable;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
|
@ -33,19 +32,19 @@ import java.util.function.Function;
|
|||
*/
|
||||
public class Mappers {
|
||||
|
||||
public static Function<Map.Entry<String, Optional<UserSessionTimestamp>>, UserSessionTimestamp> userSessionTimestamp() {
|
||||
return new UserSessionTimestampMapper();
|
||||
public static Function<Map.Entry<String, SessionEntityWrapper>, Map.Entry<String, SessionEntity>> unwrap() {
|
||||
return new SessionUnwrap();
|
||||
}
|
||||
|
||||
public static Function<Map.Entry<String, SessionEntity>, String> sessionId() {
|
||||
public static Function<Map.Entry<String, SessionEntityWrapper<UserSessionEntity>>, String> sessionId() {
|
||||
return new SessionIdMapper();
|
||||
}
|
||||
|
||||
public static Function<Map.Entry<String, SessionEntity>, SessionEntity> sessionEntity() {
|
||||
public static Function<Map.Entry<String, SessionEntityWrapper>, SessionEntity> sessionEntity() {
|
||||
return new SessionEntityMapper();
|
||||
}
|
||||
|
||||
public static Function<Map.Entry<String, SessionEntity>, UserSessionEntity> userSessionEntity() {
|
||||
public static Function<Map.Entry<String, SessionEntityWrapper<UserSessionEntity>>, UserSessionEntity> userSessionEntity() {
|
||||
return new UserSessionEntityMapper();
|
||||
}
|
||||
|
||||
|
@ -53,32 +52,55 @@ public class Mappers {
|
|||
return new LoginFailureIdMapper();
|
||||
}
|
||||
|
||||
private static class UserSessionTimestampMapper implements Function<Map.Entry<String, Optional<org.keycloak.models.sessions.infinispan.UserSessionTimestamp>>, org.keycloak.models.sessions.infinispan.UserSessionTimestamp>, Serializable {
|
||||
|
||||
private static class SessionUnwrap implements Function<Map.Entry<String, SessionEntityWrapper>, Map.Entry<String, SessionEntity>>, Serializable {
|
||||
|
||||
@Override
|
||||
public org.keycloak.models.sessions.infinispan.UserSessionTimestamp apply(Map.Entry<String, Optional<org.keycloak.models.sessions.infinispan.UserSessionTimestamp>> e) {
|
||||
return e.getValue().get();
|
||||
public Map.Entry<String, SessionEntity> apply(Map.Entry<String, SessionEntityWrapper> wrapperEntry) {
|
||||
return new Map.Entry<String, SessionEntity>() {
|
||||
|
||||
@Override
|
||||
public String getKey() {
|
||||
return wrapperEntry.getKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SessionEntity getValue() {
|
||||
return wrapperEntry.getValue().getEntity();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SessionEntity setValue(SessionEntity value) {
|
||||
throw new IllegalStateException("Unsupported operation");
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class SessionIdMapper implements Function<Map.Entry<String, SessionEntity>, String>, Serializable {
|
||||
|
||||
private static class SessionIdMapper implements Function<Map.Entry<String, SessionEntityWrapper<UserSessionEntity>>, String>, Serializable {
|
||||
@Override
|
||||
public String apply(Map.Entry<String, SessionEntity> entry) {
|
||||
public String apply(Map.Entry<String, SessionEntityWrapper<UserSessionEntity>> entry) {
|
||||
return entry.getKey();
|
||||
}
|
||||
}
|
||||
|
||||
private static class SessionEntityMapper implements Function<Map.Entry<String, SessionEntity>, SessionEntity>, Serializable {
|
||||
private static class SessionEntityMapper implements Function<Map.Entry<String, SessionEntityWrapper>, SessionEntity>, Serializable {
|
||||
@Override
|
||||
public SessionEntity apply(Map.Entry<String, SessionEntity> entry) {
|
||||
return entry.getValue();
|
||||
public SessionEntity apply(Map.Entry<String, SessionEntityWrapper> entry) {
|
||||
return entry.getValue().getEntity();
|
||||
}
|
||||
}
|
||||
|
||||
private static class UserSessionEntityMapper implements Function<Map.Entry<String, SessionEntity>, UserSessionEntity>, Serializable {
|
||||
private static class UserSessionEntityMapper implements Function<Map.Entry<String, SessionEntityWrapper<UserSessionEntity>>, UserSessionEntity>, Serializable {
|
||||
|
||||
@Override
|
||||
public UserSessionEntity apply(Map.Entry<String, SessionEntity> entry) {
|
||||
return (UserSessionEntity) entry.getValue();
|
||||
public UserSessionEntity apply(Map.Entry<String, SessionEntityWrapper<UserSessionEntity>> entry) {
|
||||
return entry.getValue().getEntity();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class LoginFailureIdMapper implements Function<Map.Entry<LoginFailureKey, LoginFailureEntity>, LoginFailureKey>, Serializable {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
package org.keycloak.models.sessions.infinispan.stream;
|
||||
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
@ -26,7 +27,7 @@ import java.util.function.Predicate;
|
|||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
*/
|
||||
public class SessionPredicate implements Predicate<Map.Entry<String, SessionEntity>>, Serializable {
|
||||
public class SessionPredicate<S extends SessionEntity> implements Predicate<Map.Entry<String, SessionEntityWrapper<S>>>, Serializable {
|
||||
|
||||
private String realm;
|
||||
|
||||
|
@ -39,8 +40,8 @@ public class SessionPredicate implements Predicate<Map.Entry<String, SessionEnti
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean test(Map.Entry<String, SessionEntity> entry) {
|
||||
return realm.equals(entry.getValue().getRealm());
|
||||
public boolean test(Map.Entry<String, SessionEntityWrapper<S>> entry) {
|
||||
return realm.equals(entry.getValue().getEntity().getRealm());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
package org.keycloak.models.sessions.infinispan.stream;
|
||||
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
|
@ -27,7 +28,7 @@ import java.util.function.Predicate;
|
|||
/**
|
||||
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
|
||||
*/
|
||||
public class UserSessionPredicate implements Predicate<Map.Entry<String, SessionEntity>>, Serializable {
|
||||
public class UserSessionPredicate implements Predicate<Map.Entry<String, SessionEntityWrapper<UserSessionEntity>>>, Serializable {
|
||||
|
||||
private String realm;
|
||||
|
||||
|
@ -77,12 +78,8 @@ public class UserSessionPredicate implements Predicate<Map.Entry<String, Session
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean test(Map.Entry<String, SessionEntity> entry) {
|
||||
SessionEntity e = entry.getValue();
|
||||
|
||||
if (!(e instanceof UserSessionEntity)) {
|
||||
return false;
|
||||
}
|
||||
public boolean test(Map.Entry<String, SessionEntityWrapper<UserSessionEntity>> entry) {
|
||||
SessionEntity e = entry.getValue().getEntity();
|
||||
|
||||
UserSessionEntity entity = (UserSessionEntity) e;
|
||||
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.util;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.client.hotrod.RemoteCache;
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
import org.infinispan.distribution.DistributionManager;
|
||||
import org.infinispan.persistence.manager.PersistenceManager;
|
||||
import org.infinispan.persistence.remote.RemoteStore;
|
||||
import org.infinispan.remoting.transport.Address;
|
||||
import org.infinispan.remoting.transport.Transport;
|
||||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class InfinispanUtil {
|
||||
|
||||
// See if we have RemoteStore (external JDG) configured for cross-Data-Center scenario
|
||||
public static Set<RemoteStore> getRemoteStores(Cache ispnCache) {
|
||||
return ispnCache.getAdvancedCache().getComponentRegistry().getComponent(PersistenceManager.class).getStores(RemoteStore.class);
|
||||
}
|
||||
|
||||
|
||||
public static RemoteCache getRemoteCache(Cache ispnCache) {
|
||||
Set<RemoteStore> remoteStores = getRemoteStores(ispnCache);
|
||||
if (remoteStores.isEmpty()) {
|
||||
return null;
|
||||
} else {
|
||||
return remoteStores.iterator().next().getRemoteCache();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static boolean isDistributedCache(Cache ispnCache) {
|
||||
CacheMode cacheMode = ispnCache.getCacheConfiguration().clustering().cacheMode();
|
||||
return cacheMode.isDistributed();
|
||||
}
|
||||
|
||||
|
||||
public static String getMyAddress(KeycloakSession session) {
|
||||
return session.getProvider(InfinispanConnectionProvider.class).getNodeName();
|
||||
}
|
||||
|
||||
public static String getMySite(KeycloakSession session) {
|
||||
return session.getProvider(InfinispanConnectionProvider.class).getSiteName();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @param ispnCache
|
||||
* @param key
|
||||
* @return address of the node, who is owner of the specified key in current cluster
|
||||
*/
|
||||
public static String getKeyPrimaryOwnerAddress(Cache ispnCache, Object key) {
|
||||
DistributionManager distManager = ispnCache.getAdvancedCache().getDistributionManager();
|
||||
if (distManager == null) {
|
||||
throw new IllegalArgumentException("Cache '" + ispnCache.getName() + "' is not distributed cache");
|
||||
}
|
||||
|
||||
return distManager.getPrimaryLocation(key).toString();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @param cache
|
||||
* @return true if cluster coordinator OR if it's local cache
|
||||
*/
|
||||
public static boolean isCoordinator(Cache cache) {
|
||||
Transport transport = cache.getCacheManager().getTransport();
|
||||
return transport == null || transport.isCoordinator();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.util;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.infinispan.commons.marshall.Externalizer;
|
||||
import org.infinispan.commons.marshall.MarshallUtil;
|
||||
import org.jboss.logging.Logger;
|
||||
|
||||
/**
|
||||
*
|
||||
* Helper to optimize marshalling/unmarhsalling of some types
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class KeycloakMarshallUtil {
|
||||
|
||||
private static final Logger log = Logger.getLogger(KeycloakMarshallUtil.class);
|
||||
|
||||
public static final StringExternalizer STRING_EXT = new StringExternalizer();
|
||||
|
||||
// MAP
|
||||
|
||||
public static <K, V> void writeMap(Map<K, V> map, Externalizer<K> keyExternalizer, Externalizer<V> valueExternalizer, ObjectOutput output) throws IOException {
|
||||
if (map == null) {
|
||||
output.writeByte(0);
|
||||
} else {
|
||||
output.writeByte(1);
|
||||
|
||||
// Copy the map as it can be updated concurrently
|
||||
Map<K, V> copy = new HashMap<>(map);
|
||||
//Map<K, V> copy = map;
|
||||
|
||||
output.writeInt(copy.size());
|
||||
|
||||
for (Map.Entry<K, V> entry : copy.entrySet()) {
|
||||
keyExternalizer.writeObject(output, entry.getKey());
|
||||
valueExternalizer.writeObject(output, entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static <K, V, TYPED_MAP extends Map<K, V>> TYPED_MAP readMap(ObjectInput input,
|
||||
Externalizer<K> keyExternalizer, Externalizer<V> valueExternalizer,
|
||||
MarshallUtil.MapBuilder<K, V, TYPED_MAP> mapBuilder) throws IOException, ClassNotFoundException {
|
||||
byte b = input.readByte();
|
||||
if (b == 0) {
|
||||
return null;
|
||||
} else {
|
||||
|
||||
int size = input.readInt();
|
||||
|
||||
TYPED_MAP map = mapBuilder.build(size);
|
||||
|
||||
for (int i=0 ; i<size ; i++) {
|
||||
K key = keyExternalizer.readObject(input);
|
||||
V value = valueExternalizer.readObject(input);
|
||||
|
||||
map.put(key, value);
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
}
|
||||
|
||||
// COLLECTION
|
||||
|
||||
public static <E> void writeCollection(Collection<E> col, Externalizer<E> valueExternalizer, ObjectOutput output) throws IOException {
|
||||
if (col == null) {
|
||||
output.writeByte(0);
|
||||
} else {
|
||||
output.writeByte(1);
|
||||
|
||||
// Copy the collection as it can be updated concurrently
|
||||
Collection<E> copy = new LinkedList<>(col);
|
||||
|
||||
output.writeInt(copy.size());
|
||||
|
||||
for (E entry : copy) {
|
||||
valueExternalizer.writeObject(output, entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static <E, T extends Collection<E>> T readCollection(ObjectInput input, Externalizer<E> valueExternalizer,
|
||||
MarshallUtil.CollectionBuilder<E, T> colBuilder) throws ClassNotFoundException, IOException {
|
||||
byte b = input.readByte();
|
||||
if (b == 0) {
|
||||
return null;
|
||||
} else {
|
||||
|
||||
int size = input.readInt();
|
||||
|
||||
T col = colBuilder.build(size);
|
||||
|
||||
for (int i=0 ; i<size ; i++) {
|
||||
E value = valueExternalizer.readObject(input);
|
||||
col.add(value);
|
||||
}
|
||||
|
||||
return col;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
public static class ConcurrentHashMapBuilder<K, V> implements MarshallUtil.MapBuilder<K, V, ConcurrentHashMap<K, V>> {
|
||||
|
||||
@Override
|
||||
public ConcurrentHashMap<K, V> build(int size) {
|
||||
return new ConcurrentHashMap<>(size);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class HashSetBuilder<E> implements MarshallUtil.CollectionBuilder<E, HashSet<E>> {
|
||||
|
||||
@Override
|
||||
public HashSet<E> build(int size) {
|
||||
return new HashSet<>(size);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static class StringExternalizer implements Externalizer<String> {
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, String str) throws IOException {
|
||||
MarshallUtil.marshallString(str, output);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
return MarshallUtil.unmarshallString(input);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -214,7 +214,7 @@ public class ConcurrencyJDGRemoteCacheTest {
|
|||
}
|
||||
}
|
||||
|
||||
private static class EntryInfo {
|
||||
public static class EntryInfo {
|
||||
AtomicInteger successfulInitializations = new AtomicInteger(0);
|
||||
AtomicInteger successfulListenerWrites = new AtomicInteger(0);
|
||||
AtomicInteger th1 = new AtomicInteger();
|
||||
|
|
|
@ -0,0 +1,378 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.cluster.infinispan;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.client.hotrod.RemoteCache;
|
||||
import org.infinispan.client.hotrod.VersionedValue;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheEntryCreated;
|
||||
import org.infinispan.client.hotrod.annotation.ClientCacheEntryModified;
|
||||
import org.infinispan.client.hotrod.annotation.ClientListener;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheEntryCreatedEvent;
|
||||
import org.infinispan.client.hotrod.event.ClientCacheEntryModifiedEvent;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.configuration.cache.ConfigurationBuilder;
|
||||
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
import org.infinispan.persistence.manager.PersistenceManager;
|
||||
import org.infinispan.persistence.remote.RemoteStore;
|
||||
import org.infinispan.persistence.remote.configuration.ExhaustedAction;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.common.util.Time;
|
||||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.remotestore.KcRemoteStore;
|
||||
import org.keycloak.models.sessions.infinispan.remotestore.KcRemoteStoreConfigurationBuilder;
|
||||
import org.keycloak.models.sessions.infinispan.util.InfinispanUtil;
|
||||
|
||||
/**
|
||||
* Test requires to prepare 2 JDG (or infinispan servers) before it's runned.
|
||||
* Steps:
|
||||
* - In JDG1/standalone/configuration/clustered.xml add this: <replicated-cache name="sessions" mode="SYNC" start="EAGER"/>
|
||||
* - Same in JDG2
|
||||
* - Run JDG1 with: ./standalone.sh -c clustered.xml
|
||||
* - Run JDG2 with: ./standalone.sh -c clustered.xml -Djboss.socket.binding.port-offset=100
|
||||
* - Run this test
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class ConcurrencyJDGSessionsCacheTest {
|
||||
|
||||
protected static final Logger logger = Logger.getLogger(KcRemoteStore.class);
|
||||
|
||||
private static final int ITERATION_PER_WORKER = 1000;
|
||||
|
||||
private static final AtomicInteger failedReplaceCounter = new AtomicInteger(0);
|
||||
private static final AtomicInteger failedReplaceCounter2 = new AtomicInteger(0);
|
||||
|
||||
private static final AtomicInteger successfulListenerWrites = new AtomicInteger(0);
|
||||
private static final AtomicInteger successfulListenerWrites2 = new AtomicInteger(0);
|
||||
|
||||
//private static Map<String, EntryInfo> state = new HashMap<>();
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache1 = createManager(1).getCache(InfinispanConnectionProvider.SESSION_CACHE_NAME);
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache2 = createManager(2).getCache(InfinispanConnectionProvider.SESSION_CACHE_NAME);
|
||||
|
||||
// Create initial item
|
||||
UserSessionEntity session = new UserSessionEntity();
|
||||
session.setId("123");
|
||||
session.setRealm("foo");
|
||||
session.setBrokerSessionId("!23123123");
|
||||
session.setBrokerUserId(null);
|
||||
session.setUser("foo");
|
||||
session.setLoginUsername("foo");
|
||||
session.setIpAddress("123.44.143.178");
|
||||
session.setStarted(Time.currentTime());
|
||||
session.setLastSessionRefresh(Time.currentTime());
|
||||
|
||||
AuthenticatedClientSessionEntity clientSession = new AuthenticatedClientSessionEntity();
|
||||
clientSession.setAuthMethod("saml");
|
||||
clientSession.setAction("something");
|
||||
clientSession.setTimestamp(1234);
|
||||
clientSession.setProtocolMappers(new HashSet<>(Arrays.asList("mapper1", "mapper2")));
|
||||
clientSession.setRoles(new HashSet<>(Arrays.asList("role1", "role2")));
|
||||
session.getAuthenticatedClientSessions().put("client1", clientSession);
|
||||
|
||||
SessionEntityWrapper<UserSessionEntity> wrappedSession = new SessionEntityWrapper<>(session);
|
||||
|
||||
// Some dummy testing of remoteStore behaviour
|
||||
logger.info("Before put");
|
||||
|
||||
cache1
|
||||
.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL) // will still invoke remoteStore . Just doesn't propagate to cluster
|
||||
.put("123", wrappedSession);
|
||||
|
||||
logger.info("After put");
|
||||
|
||||
cache1.replace("123", wrappedSession);
|
||||
|
||||
logger.info("After replace");
|
||||
|
||||
cache1.get("123");
|
||||
|
||||
logger.info("After cache1.get");
|
||||
|
||||
cache2.get("123");
|
||||
|
||||
logger.info("After cache2.get");
|
||||
|
||||
cache1.get("123");
|
||||
|
||||
logger.info("After cache1.get - second call");
|
||||
|
||||
cache2.get("123");
|
||||
|
||||
logger.info("After cache2.get - second call");
|
||||
|
||||
cache2.replace("123", wrappedSession);
|
||||
|
||||
logger.info("After replace - second call");
|
||||
|
||||
cache1.get("123");
|
||||
|
||||
logger.info("After cache1.get - third call");
|
||||
|
||||
cache2.get("123");
|
||||
|
||||
logger.info("After cache2.get - third call");
|
||||
|
||||
cache1
|
||||
.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD)
|
||||
.entrySet().stream().forEach(e -> {
|
||||
});
|
||||
|
||||
logger.info("After cache1.stream");
|
||||
|
||||
// Explicitly call put on remoteCache (KcRemoteCache.write ignores remote writes)
|
||||
InfinispanUtil.getRemoteCache(cache1).put("123", session);
|
||||
|
||||
// Create caches, listeners and finally worker threads
|
||||
Thread worker1 = createWorker(cache1, 1);
|
||||
Thread worker2 = createWorker(cache2, 2);
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
|
||||
// Start and join workers
|
||||
worker1.start();
|
||||
worker2.start();
|
||||
|
||||
worker1.join();
|
||||
worker2.join();
|
||||
|
||||
long took = System.currentTimeMillis() - start;
|
||||
|
||||
// // Output
|
||||
// for (Map.Entry<String, EntryInfo> entry : state.entrySet()) {
|
||||
// System.out.println(entry.getKey() + ":::" + entry.getValue());
|
||||
// worker1.cache.remove(entry.getKey());
|
||||
// }
|
||||
|
||||
System.out.println("Finished. Took: " + took + " ms. Notes: " + cache1.get("123").getEntity().getNotes().size() +
|
||||
", successfulListenerWrites: " + successfulListenerWrites.get() + ", successfulListenerWrites2: " + successfulListenerWrites2.get() +
|
||||
", failedReplaceCounter: " + failedReplaceCounter.get() + ", failedReplaceCounter2: " + failedReplaceCounter2.get() );
|
||||
|
||||
// Finish JVM
|
||||
cache1.getCacheManager().stop();
|
||||
cache2.getCacheManager().stop();
|
||||
}
|
||||
|
||||
private static Thread createWorker(Cache<String, SessionEntityWrapper<UserSessionEntity>> cache, int threadId) {
|
||||
System.out.println("Retrieved cache: " + threadId);
|
||||
|
||||
RemoteCache remoteCache = InfinispanUtil.getRemoteCache(cache);
|
||||
|
||||
remoteCache.keySet();
|
||||
|
||||
AtomicInteger counter = threadId ==1 ? successfulListenerWrites : successfulListenerWrites2;
|
||||
HotRodListener listener = new HotRodListener(cache, remoteCache, counter);
|
||||
remoteCache.addClientListener(listener);
|
||||
|
||||
return new RemoteCacheWorker(remoteCache, threadId);
|
||||
//return new CacheWorker(cache, threadId);
|
||||
}
|
||||
|
||||
private static EmbeddedCacheManager createManager(int threadId) {
|
||||
System.setProperty("java.net.preferIPv4Stack", "true");
|
||||
System.setProperty("jgroups.tcp.port", "53715");
|
||||
GlobalConfigurationBuilder gcb = new GlobalConfigurationBuilder();
|
||||
|
||||
boolean clustered = false;
|
||||
boolean async = false;
|
||||
boolean allowDuplicateJMXDomains = true;
|
||||
|
||||
if (clustered) {
|
||||
gcb = gcb.clusteredDefault();
|
||||
gcb.transport().clusterName("test-clustering");
|
||||
}
|
||||
|
||||
gcb.globalJmxStatistics().allowDuplicateDomains(allowDuplicateJMXDomains);
|
||||
|
||||
EmbeddedCacheManager cacheManager = new DefaultCacheManager(gcb.build());
|
||||
|
||||
Configuration invalidationCacheConfiguration = getCacheBackedByRemoteStore(threadId);
|
||||
|
||||
cacheManager.defineConfiguration(InfinispanConnectionProvider.SESSION_CACHE_NAME, invalidationCacheConfiguration);
|
||||
return cacheManager;
|
||||
|
||||
}
|
||||
|
||||
private static Configuration getCacheBackedByRemoteStore(int threadId) {
|
||||
ConfigurationBuilder cacheConfigBuilder = new ConfigurationBuilder();
|
||||
|
||||
//int port = threadId==1 ? 11222 : 11322;
|
||||
int port = 11222;
|
||||
|
||||
return cacheConfigBuilder.persistence().addStore(KcRemoteStoreConfigurationBuilder.class)
|
||||
.fetchPersistentState(false)
|
||||
.ignoreModifications(false)
|
||||
.purgeOnStartup(false)
|
||||
.preload(false)
|
||||
.shared(true)
|
||||
.remoteCacheName(InfinispanConnectionProvider.SESSION_CACHE_NAME)
|
||||
.rawValues(true)
|
||||
.forceReturnValues(false)
|
||||
.marshaller(KeycloakHotRodMarshallerFactory.class.getName())
|
||||
.addServer()
|
||||
.host("localhost")
|
||||
.port(port)
|
||||
.connectionPool()
|
||||
.maxActive(20)
|
||||
.exhaustedAction(ExhaustedAction.CREATE_NEW)
|
||||
.async()
|
||||
.enabled(false).build();
|
||||
}
|
||||
|
||||
@ClientListener
|
||||
public static class HotRodListener {
|
||||
|
||||
private Cache<String, SessionEntityWrapper<UserSessionEntity>> origCache;
|
||||
private RemoteCache remoteCache;
|
||||
private AtomicInteger listenerCount;
|
||||
|
||||
public HotRodListener(Cache<String, SessionEntityWrapper<UserSessionEntity>> origCache, RemoteCache remoteCache, AtomicInteger listenerCount) {
|
||||
this.listenerCount = listenerCount;
|
||||
this.remoteCache = remoteCache;
|
||||
this.origCache = origCache;
|
||||
}
|
||||
|
||||
@ClientCacheEntryCreated
|
||||
public void created(ClientCacheEntryCreatedEvent event) {
|
||||
String cacheKey = (String) event.getKey();
|
||||
listenerCount.incrementAndGet();
|
||||
}
|
||||
|
||||
@ClientCacheEntryModified
|
||||
public void updated(ClientCacheEntryModifiedEvent event) {
|
||||
String cacheKey = (String) event.getKey();
|
||||
listenerCount.incrementAndGet();
|
||||
|
||||
// TODO: can be optimized
|
||||
SessionEntity session = (SessionEntity) remoteCache.get(cacheKey);
|
||||
SessionEntityWrapper sessionWrapper = new SessionEntityWrapper(session);
|
||||
|
||||
// TODO: for distributed caches, ensure that it is executed just on owner OR if event.isCommandRetried
|
||||
origCache
|
||||
.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD, Flag.SKIP_CACHE_STORE)
|
||||
.replace(cacheKey, sessionWrapper);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
private static class RemoteCacheWorker extends Thread {
|
||||
|
||||
private final RemoteCache<String, UserSessionEntity> cache;
|
||||
|
||||
private final int myThreadId;
|
||||
|
||||
private RemoteCacheWorker(RemoteCache cache, int myThreadId) {
|
||||
this.cache = cache;
|
||||
this.myThreadId = myThreadId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
for (int i=0 ; i<ITERATION_PER_WORKER ; i++) {
|
||||
|
||||
String noteKey = "n-" + myThreadId + "-" + i;
|
||||
|
||||
boolean replaced = false;
|
||||
while (!replaced) {
|
||||
VersionedValue<UserSessionEntity> versioned = cache.getVersioned("123");
|
||||
UserSessionEntity oldSession = versioned.getValue();
|
||||
//UserSessionEntity clone = DistributedCacheConcurrentWritesTest.cloneSession(oldSession);
|
||||
UserSessionEntity clone = oldSession;
|
||||
|
||||
clone.getNotes().put(noteKey, "someVal");
|
||||
//cache.replace("123", clone);
|
||||
replaced = cacheReplace(versioned, clone);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private boolean cacheReplace(VersionedValue<UserSessionEntity> oldSession, UserSessionEntity newSession) {
|
||||
try {
|
||||
boolean replaced = cache.replaceWithVersion("123", newSession, oldSession.getVersion());
|
||||
//cache.replace("123", newSession);
|
||||
if (!replaced) {
|
||||
failedReplaceCounter.incrementAndGet();
|
||||
//return false;
|
||||
//System.out.println("Replace failed!!!");
|
||||
}
|
||||
return replaced;
|
||||
} catch (Exception re) {
|
||||
failedReplaceCounter2.incrementAndGet();
|
||||
return false;
|
||||
}
|
||||
//return replaced;
|
||||
}
|
||||
|
||||
}
|
||||
/*
|
||||
// Worker, which operates on "classic" cache and rely on operations delegated to the second cache
|
||||
private static class CacheWorker extends Thread {
|
||||
|
||||
private final Cache<String, SessionEntityWrapper<UserSessionEntity>> cache;
|
||||
|
||||
private final int myThreadId;
|
||||
|
||||
private CacheWorker(Cache<String, SessionEntityWrapper<UserSessionEntity>> cache, int myThreadId) {
|
||||
this.cache = cache;
|
||||
this.myThreadId = myThreadId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
for (int i=0 ; i<ITERATION_PER_WORKER ; i++) {
|
||||
|
||||
String noteKey = "n-" + myThreadId + "-" + i;
|
||||
|
||||
boolean replaced = false;
|
||||
while (!replaced) {
|
||||
VersionedValue<UserSessionEntity> versioned = cache.getVersioned("123");
|
||||
UserSessionEntity oldSession = versioned.getValue();
|
||||
//UserSessionEntity clone = DistributedCacheConcurrentWritesTest.cloneSession(oldSession);
|
||||
UserSessionEntity clone = oldSession;
|
||||
|
||||
clone.getNotes().put(noteKey, "someVal");
|
||||
//cache.replace("123", clone);
|
||||
replaced = cacheReplace(versioned, clone);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}*/
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,253 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.initializer;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.configuration.cache.ConfigurationBuilder;
|
||||
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
|
||||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
|
||||
import org.jgroups.JChannel;
|
||||
import org.junit.Ignore;
|
||||
import org.keycloak.common.util.Time;
|
||||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
|
||||
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
/**
|
||||
* Test concurrent writes to distributed cache with usage of atomic replace
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
@Ignore
|
||||
public class DistributedCacheConcurrentWritesTest {
|
||||
|
||||
private static final int ITERATION_PER_WORKER = 1000;
|
||||
|
||||
private static final AtomicInteger failedReplaceCounter = new AtomicInteger(0);
|
||||
private static final AtomicInteger failedReplaceCounter2 = new AtomicInteger(0);
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
CacheWrapper<String, UserSessionEntity> cache1 = createCache("node1");
|
||||
CacheWrapper<String, UserSessionEntity> cache2 = createCache("node2");
|
||||
|
||||
// Create initial item
|
||||
UserSessionEntity session = new UserSessionEntity();
|
||||
session.setId("123");
|
||||
session.setRealm("foo");
|
||||
session.setBrokerSessionId("!23123123");
|
||||
session.setBrokerUserId(null);
|
||||
session.setUser("foo");
|
||||
session.setLoginUsername("foo");
|
||||
session.setIpAddress("123.44.143.178");
|
||||
session.setStarted(Time.currentTime());
|
||||
session.setLastSessionRefresh(Time.currentTime());
|
||||
|
||||
AuthenticatedClientSessionEntity clientSession = new AuthenticatedClientSessionEntity();
|
||||
clientSession.setAuthMethod("saml");
|
||||
clientSession.setAction("something");
|
||||
clientSession.setTimestamp(1234);
|
||||
clientSession.setProtocolMappers(new HashSet<>(Arrays.asList("mapper1", "mapper2")));
|
||||
clientSession.setRoles(new HashSet<>(Arrays.asList("role1", "role2")));
|
||||
session.getAuthenticatedClientSessions().put("client1", clientSession);
|
||||
|
||||
cache1.put("123", session);
|
||||
|
||||
// Create 2 workers for concurrent write and start them
|
||||
Worker worker1 = new Worker(1, cache1);
|
||||
Worker worker2 = new Worker(2, cache2);
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
|
||||
System.out.println("Started clustering test");
|
||||
|
||||
worker1.start();
|
||||
//worker1.join();
|
||||
worker2.start();
|
||||
|
||||
worker1.join();
|
||||
worker2.join();
|
||||
|
||||
long took = System.currentTimeMillis() - start;
|
||||
session = cache1.get("123").getEntity();
|
||||
System.out.println("Took: " + took + " ms. Notes count: " + session.getNotes().size() + ", failedReplaceCounter: " + failedReplaceCounter.get()
|
||||
+ ", failedReplaceCounter2: " + failedReplaceCounter2.get());
|
||||
|
||||
// JGroups statistics
|
||||
JChannel channel = (JChannel)((JGroupsTransport)cache1.wrappedCache.getAdvancedCache().getRpcManager().getTransport()).getChannel();
|
||||
System.out.println("Sent MB: " + channel.getSentBytes() / 1000000 + ", sent messages: " + channel.getSentMessages() + ", received MB: " + channel.getReceivedBytes() / 1000000 +
|
||||
", received messages: " + channel.getReceivedMessages());
|
||||
|
||||
// Kill JVM
|
||||
cache1.getCache().stop();
|
||||
cache2.getCache().stop();
|
||||
cache1.getCache().getCacheManager().stop();
|
||||
cache2.getCache().getCacheManager().stop();
|
||||
|
||||
System.out.println("Managers killed");
|
||||
}
|
||||
|
||||
|
||||
private static class Worker extends Thread {
|
||||
|
||||
private final CacheWrapper<String, UserSessionEntity> cache;
|
||||
private final int threadId;
|
||||
|
||||
public Worker(int threadId, CacheWrapper<String, UserSessionEntity> cache) {
|
||||
this.threadId = threadId;
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
for (int i=0 ; i<ITERATION_PER_WORKER ; i++) {
|
||||
|
||||
String noteKey = "n-" + threadId + "-" + i;
|
||||
|
||||
boolean replaced = false;
|
||||
while (!replaced) {
|
||||
SessionEntityWrapper<UserSessionEntity> oldWrapped = cache.get("123");
|
||||
UserSessionEntity oldSession = oldWrapped.getEntity();
|
||||
//UserSessionEntity clone = DistributedCacheConcurrentWritesTest.cloneSession(oldSession);
|
||||
UserSessionEntity clone = oldSession;
|
||||
|
||||
clone.getNotes().put(noteKey, "someVal");
|
||||
//cache.replace("123", clone);
|
||||
replaced = cacheReplace(oldWrapped, clone);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private boolean cacheReplace(SessionEntityWrapper<UserSessionEntity> oldSession, UserSessionEntity newSession) {
|
||||
try {
|
||||
boolean replaced = cache.replace("123", oldSession, newSession);
|
||||
//cache.replace("123", newSession);
|
||||
if (!replaced) {
|
||||
failedReplaceCounter.incrementAndGet();
|
||||
//return false;
|
||||
//System.out.println("Replace failed!!!");
|
||||
}
|
||||
return replaced;
|
||||
} catch (Exception re) {
|
||||
failedReplaceCounter2.incrementAndGet();
|
||||
return false;
|
||||
}
|
||||
//return replaced;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Session clone
|
||||
|
||||
private static UserSessionEntity cloneSession(UserSessionEntity session) {
|
||||
UserSessionEntity clone = new UserSessionEntity();
|
||||
clone.setId(session.getId());
|
||||
clone.setRealm(session.getRealm());
|
||||
clone.setNotes(new ConcurrentHashMap<>(session.getNotes()));
|
||||
return clone;
|
||||
}
|
||||
|
||||
|
||||
// Cache creation utils
|
||||
|
||||
public static class CacheWrapper<K, V extends SessionEntity> {
|
||||
|
||||
private final Cache<K, SessionEntityWrapper<V>> wrappedCache;
|
||||
|
||||
public CacheWrapper(Cache<K, SessionEntityWrapper<V>> wrappedCache) {
|
||||
this.wrappedCache = wrappedCache;
|
||||
}
|
||||
|
||||
|
||||
public SessionEntityWrapper<V> get(K key) {
|
||||
SessionEntityWrapper<V> val = wrappedCache.get(key);
|
||||
return val;
|
||||
}
|
||||
|
||||
public void put(K key, V newVal) {
|
||||
SessionEntityWrapper<V> newWrapper = new SessionEntityWrapper<>(newVal);
|
||||
wrappedCache.put(key, newWrapper);
|
||||
}
|
||||
|
||||
|
||||
public boolean replace(K key, SessionEntityWrapper<V> oldVal, V newVal) {
|
||||
SessionEntityWrapper<V> newWrapper = new SessionEntityWrapper<>(newVal);
|
||||
return wrappedCache.replace(key, oldVal, newWrapper);
|
||||
}
|
||||
|
||||
private Cache<K, SessionEntityWrapper<V>> getCache() {
|
||||
return wrappedCache;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
public static CacheWrapper<String, UserSessionEntity> createCache(String nodeName) {
|
||||
EmbeddedCacheManager mgr = createManager(nodeName);
|
||||
Cache<String, SessionEntityWrapper<UserSessionEntity>> wrapped = mgr.getCache(InfinispanConnectionProvider.SESSION_CACHE_NAME);
|
||||
return new CacheWrapper<>(wrapped);
|
||||
}
|
||||
|
||||
|
||||
public static EmbeddedCacheManager createManager(String nodeName) {
|
||||
System.setProperty("java.net.preferIPv4Stack", "true");
|
||||
System.setProperty("jgroups.tcp.port", "53715");
|
||||
GlobalConfigurationBuilder gcb = new GlobalConfigurationBuilder();
|
||||
|
||||
boolean clustered = true;
|
||||
boolean async = false;
|
||||
boolean allowDuplicateJMXDomains = true;
|
||||
|
||||
if (clustered) {
|
||||
gcb = gcb.clusteredDefault();
|
||||
gcb.transport().clusterName("test-clustering");
|
||||
gcb.transport().nodeName(nodeName);
|
||||
}
|
||||
gcb.globalJmxStatistics().allowDuplicateDomains(allowDuplicateJMXDomains);
|
||||
|
||||
EmbeddedCacheManager cacheManager = new DefaultCacheManager(gcb.build());
|
||||
|
||||
|
||||
ConfigurationBuilder distConfigBuilder = new ConfigurationBuilder();
|
||||
if (clustered) {
|
||||
distConfigBuilder.clustering().cacheMode(async ? CacheMode.DIST_ASYNC : CacheMode.DIST_SYNC);
|
||||
distConfigBuilder.clustering().hash().numOwners(1);
|
||||
|
||||
// Disable L1 cache
|
||||
distConfigBuilder.clustering().hash().l1().enabled(false);
|
||||
}
|
||||
Configuration distConfig = distConfigBuilder.build();
|
||||
|
||||
cacheManager.defineConfiguration(InfinispanConnectionProvider.SESSION_CACHE_NAME, distConfig);
|
||||
return cacheManager;
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models.sessions.infinispan.initializer;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.configuration.cache.ConfigurationBuilder;
|
||||
import org.infinispan.configuration.cache.VersioningScheme;
|
||||
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
|
||||
import org.infinispan.transaction.LockingMode;
|
||||
import org.infinispan.transaction.lookup.DummyTransactionManagerLookup;
|
||||
import org.infinispan.util.concurrent.IsolationLevel;
|
||||
import org.jgroups.JChannel;
|
||||
import org.junit.Ignore;
|
||||
import org.keycloak.common.util.Time;
|
||||
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
|
||||
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
|
||||
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
|
||||
|
||||
/**
|
||||
* Test concurrent writes to distributed cache with usage of write skew
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
@Ignore
|
||||
public class DistributedCacheWriteSkewTest {
|
||||
|
||||
private static final int ITERATION_PER_WORKER = 1000;
|
||||
|
||||
private static final AtomicInteger failedReplaceCounter = new AtomicInteger(0);
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
Cache<String, UserSessionEntity> cache1 = createManager("node1").getCache(InfinispanConnectionProvider.SESSION_CACHE_NAME);
|
||||
Cache<String, UserSessionEntity> cache2 = createManager("node2").getCache(InfinispanConnectionProvider.SESSION_CACHE_NAME);
|
||||
|
||||
// Create initial item
|
||||
UserSessionEntity session = new UserSessionEntity();
|
||||
session.setId("123");
|
||||
session.setRealm("foo");
|
||||
session.setBrokerSessionId("!23123123");
|
||||
session.setBrokerUserId(null);
|
||||
session.setUser("foo");
|
||||
session.setLoginUsername("foo");
|
||||
session.setIpAddress("123.44.143.178");
|
||||
session.setStarted(Time.currentTime());
|
||||
session.setLastSessionRefresh(Time.currentTime());
|
||||
|
||||
AuthenticatedClientSessionEntity clientSession = new AuthenticatedClientSessionEntity();
|
||||
clientSession.setAuthMethod("saml");
|
||||
clientSession.setAction("something");
|
||||
clientSession.setTimestamp(1234);
|
||||
clientSession.setProtocolMappers(new HashSet<>(Arrays.asList("mapper1", "mapper2")));
|
||||
clientSession.setRoles(new HashSet<>(Arrays.asList("role1", "role2")));
|
||||
session.getAuthenticatedClientSessions().put("client1", clientSession);
|
||||
|
||||
cache1.put("123", session);
|
||||
|
||||
//cache1.replace("123", session);
|
||||
|
||||
// Create 2 workers for concurrent write and start them
|
||||
Worker worker1 = new Worker(1, cache1);
|
||||
Worker worker2 = new Worker(2, cache2);
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
|
||||
System.out.println("Started clustering test");
|
||||
|
||||
worker1.start();
|
||||
//worker1.join();
|
||||
worker2.start();
|
||||
|
||||
worker1.join();
|
||||
worker2.join();
|
||||
|
||||
long took = System.currentTimeMillis() - start;
|
||||
session = cache1.get("123");
|
||||
System.out.println("Took: " + took + " ms. Notes count: " + session.getNotes().size() + ", failedReplaceCounter: " + failedReplaceCounter.get());
|
||||
|
||||
// JGroups statistics
|
||||
JChannel channel = (JChannel)((JGroupsTransport)cache1.getAdvancedCache().getRpcManager().getTransport()).getChannel();
|
||||
System.out.println("Sent MB: " + channel.getSentBytes() / 1000000 + ", sent messages: " + channel.getSentMessages() + ", received MB: " + channel.getReceivedBytes() / 1000000 +
|
||||
", received messages: " + channel.getReceivedMessages());
|
||||
|
||||
// Kill JVM
|
||||
cache1.stop();
|
||||
cache2.stop();
|
||||
cache1.getCacheManager().stop();
|
||||
cache2.getCacheManager().stop();
|
||||
|
||||
System.out.println("Managers killed");
|
||||
}
|
||||
|
||||
|
||||
private static class Worker extends Thread {
|
||||
|
||||
private final Cache<String, UserSessionEntity> cache;
|
||||
private final int threadId;
|
||||
|
||||
public Worker(int threadId, Cache<String, UserSessionEntity> cache) {
|
||||
this.threadId = threadId;
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
for (int i=0 ; i<ITERATION_PER_WORKER ; i++) {
|
||||
|
||||
String noteKey = "n-" + threadId + "-" + i;
|
||||
|
||||
boolean replaced = false;
|
||||
while (!replaced) {
|
||||
try {
|
||||
//cache.startBatch();
|
||||
|
||||
UserSessionEntity oldSession = cache.get("123");
|
||||
|
||||
//UserSessionEntity clone = DistributedCacheConcurrentWritesTest.cloneSession(oldSession);
|
||||
UserSessionEntity clone = oldSession;
|
||||
|
||||
clone.getNotes().put(noteKey, "someVal");
|
||||
|
||||
cache.replace("123", clone);
|
||||
//cache.getAdvancedCache().withFlags(Flag.FAIL_SILENTLY).endBatch(true);
|
||||
replaced = true;
|
||||
} catch (Exception e) {
|
||||
System.out.println(e);
|
||||
failedReplaceCounter.incrementAndGet();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
public static EmbeddedCacheManager createManager(String nodeName) {
|
||||
System.setProperty("java.net.preferIPv4Stack", "true");
|
||||
System.setProperty("jgroups.tcp.port", "53715");
|
||||
GlobalConfigurationBuilder gcb = new GlobalConfigurationBuilder();
|
||||
|
||||
boolean clustered = true;
|
||||
boolean async = false;
|
||||
boolean allowDuplicateJMXDomains = true;
|
||||
|
||||
if (clustered) {
|
||||
gcb = gcb.clusteredDefault();
|
||||
gcb.transport().clusterName("test-clustering");
|
||||
gcb.transport().nodeName(nodeName);
|
||||
}
|
||||
gcb.globalJmxStatistics().allowDuplicateDomains(allowDuplicateJMXDomains);
|
||||
|
||||
EmbeddedCacheManager cacheManager = new DefaultCacheManager(gcb.build());
|
||||
|
||||
|
||||
ConfigurationBuilder distConfigBuilder = new ConfigurationBuilder();
|
||||
if (clustered) {
|
||||
distConfigBuilder.clustering().cacheMode(async ? CacheMode.DIST_ASYNC : CacheMode.DIST_SYNC);
|
||||
distConfigBuilder.clustering().hash().numOwners(1);
|
||||
|
||||
// Disable L1 cache
|
||||
distConfigBuilder.clustering().hash().l1().enabled(false);
|
||||
|
||||
//distConfigBuilder.storeAsBinary().enable().storeKeysAsBinary(false).storeValuesAsBinary(true);
|
||||
|
||||
distConfigBuilder.versioning().enabled(true);
|
||||
distConfigBuilder.versioning().scheme(VersioningScheme.SIMPLE);
|
||||
|
||||
distConfigBuilder.locking().writeSkewCheck(true);
|
||||
distConfigBuilder.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
|
||||
distConfigBuilder.locking().concurrencyLevel(32);
|
||||
distConfigBuilder.locking().lockAcquisitionTimeout(1000, TimeUnit.SECONDS);
|
||||
|
||||
distConfigBuilder.versioning().enabled(true);
|
||||
distConfigBuilder.versioning().scheme(VersioningScheme.SIMPLE);
|
||||
|
||||
|
||||
// distConfigBuilder.invocationBatching().enable();
|
||||
//distConfigBuilder.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
|
||||
distConfigBuilder.transaction().transactionManagerLookup(new DummyTransactionManagerLookup());
|
||||
distConfigBuilder.transaction().lockingMode(LockingMode.OPTIMISTIC);
|
||||
}
|
||||
Configuration distConfig = distConfigBuilder.build();
|
||||
|
||||
cacheManager.defineConfiguration(InfinispanConnectionProvider.SESSION_CACHE_NAME, distConfig);
|
||||
return cacheManager;
|
||||
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ package org.keycloak.cluster;
|
|||
import org.keycloak.provider.Provider;
|
||||
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
/**
|
||||
* Various utils related to clustering and concurrent tasks on cluster nodes
|
||||
|
@ -47,9 +48,21 @@ public interface ClusterProvider extends Provider {
|
|||
<T> ExecutionResult<T> executeIfNotExecuted(String taskKey, int taskTimeoutInSeconds, Callable<T> task);
|
||||
|
||||
|
||||
/**
|
||||
* Execute given task just if it's not already in progress (either on this or any other cluster node). It will return corresponding future to every caller and this future is fulfilled if:
|
||||
* - The task is successfully finished. In that case Future will be true
|
||||
* - The task wasn't successfully finished. For example because cluster node failover. In that case Future will be false
|
||||
*
|
||||
* @param taskKey
|
||||
* @param taskTimeoutInSeconds timeout for given task. If there is existing task in progress for longer time, it's considered outdated so we will start our task.
|
||||
* @param task
|
||||
* @return Future, which will be completed once the running task is finished. Returns true if task was successfully finished. Otherwise (for example if cluster node when task was running leaved cluster) returns false
|
||||
*/
|
||||
Future<Boolean> executeIfNotExecutedAsync(String taskKey, int taskTimeoutInSeconds, Callable task);
|
||||
|
||||
|
||||
/**
|
||||
* Register task (listener) under given key. When this key will be put to the cache on any cluster node, the task will be executed.
|
||||
* When using {@link #ALL} as the taskKey, then listener will be always triggered for any value put into the cache.
|
||||
*
|
||||
* @param taskKey
|
||||
* @param task
|
||||
|
@ -58,18 +71,24 @@ public interface ClusterProvider extends Provider {
|
|||
|
||||
|
||||
/**
|
||||
* Notify registered listeners on all cluster nodes. It will notify listeners registered under given taskKey AND also listeners registered with {@link #ALL} key (those are always executed)
|
||||
* Notify registered listeners on all cluster nodes in all datacenters. It will notify listeners registered under given taskKey
|
||||
*
|
||||
* @param taskKey
|
||||
* @param event
|
||||
* @param ignoreSender if true, then sender node itself won't receive the notification
|
||||
* @param dcNotify Specify which DCs to notify. See {@link DCNotify} enum values for more info
|
||||
*/
|
||||
void notify(String taskKey, ClusterEvent event, boolean ignoreSender);
|
||||
void notify(String taskKey, ClusterEvent event, boolean ignoreSender, DCNotify dcNotify);
|
||||
|
||||
enum DCNotify {
|
||||
/** Send message to all cluster nodes in all DCs **/
|
||||
ALL_DCS,
|
||||
|
||||
/** Send message to all cluster nodes on THIS datacenter only **/
|
||||
LOCAL_DC_ONLY,
|
||||
|
||||
/** Send message to all cluster nodes in all datacenters, but NOT to this datacenter. Option "ignoreSender" of method {@link #notify} will be ignored as sender is ignored anyway due it is in this datacenter **/
|
||||
ALL_BUT_LOCAL_DC
|
||||
}
|
||||
|
||||
/**
|
||||
* Special value to be used with {@link #registerListener} to specify that particular listener will be always triggered for all notifications
|
||||
* with any key.
|
||||
*/
|
||||
String ALL = "ALL";
|
||||
}
|
||||
|
|
|
@ -115,11 +115,6 @@ public class PersistentUserSessionAdapter implements UserSessionModel {
|
|||
return user;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setUser(UserModel user) {
|
||||
throw new IllegalStateException("Not supported");
|
||||
}
|
||||
|
||||
@Override
|
||||
public RealmModel getRealm() {
|
||||
return realm;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
package org.keycloak.models.utils;
|
||||
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.provider.ProviderEvent;
|
||||
|
||||
/**
|
||||
|
@ -25,4 +26,14 @@ import org.keycloak.provider.ProviderEvent;
|
|||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class PostMigrationEvent implements ProviderEvent {
|
||||
|
||||
private final KeycloakSession session;
|
||||
|
||||
public PostMigrationEvent(KeycloakSession session) {
|
||||
this.session = session;
|
||||
}
|
||||
|
||||
public KeycloakSession getSession() {
|
||||
return session;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.models;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
|
||||
/**
|
||||
* Handles some common transaction logic related to start, rollback-only etc.
|
||||
*
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public abstract class AbstractKeycloakTransaction implements KeycloakTransaction {
|
||||
|
||||
public static final Logger logger = Logger.getLogger(AbstractKeycloakTransaction.class);
|
||||
|
||||
protected TransactionState state = TransactionState.NOT_STARTED;
|
||||
|
||||
@Override
|
||||
public void begin() {
|
||||
if (state != TransactionState.NOT_STARTED) {
|
||||
throw new IllegalStateException("Transaction already started");
|
||||
}
|
||||
|
||||
state = TransactionState.STARTED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit() {
|
||||
if (state != TransactionState.STARTED) {
|
||||
throw new IllegalStateException("Transaction in illegal state for commit: " + state);
|
||||
}
|
||||
|
||||
commitImpl();
|
||||
|
||||
state = TransactionState.FINISHED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
if (state != TransactionState.STARTED && state != TransactionState.ROLLBACK_ONLY) {
|
||||
throw new IllegalStateException("Transaction in illegal state for rollback: " + state);
|
||||
}
|
||||
|
||||
rollbackImpl();
|
||||
|
||||
state = TransactionState.FINISHED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRollbackOnly() {
|
||||
state = TransactionState.ROLLBACK_ONLY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getRollbackOnly() {
|
||||
return state == TransactionState.ROLLBACK_ONLY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isActive() {
|
||||
return state == TransactionState.STARTED || state == TransactionState.ROLLBACK_ONLY;
|
||||
}
|
||||
|
||||
public TransactionState getState() {
|
||||
return state;
|
||||
}
|
||||
|
||||
public enum TransactionState {
|
||||
NOT_STARTED, STARTED, ROLLBACK_ONLY, FINISHED
|
||||
}
|
||||
|
||||
|
||||
protected abstract void commitImpl();
|
||||
|
||||
protected abstract void rollbackImpl();
|
||||
}
|
|
@ -62,8 +62,6 @@ public interface UserSessionModel {
|
|||
State getState();
|
||||
void setState(State state);
|
||||
|
||||
void setUser(UserModel user);
|
||||
|
||||
// Will completely restart whole state of user session. It will just keep same ID.
|
||||
void restartSession(RealmModel realm, UserModel user, String loginUsername, String ipAddress, String authMethod, boolean rememberMe, String brokerSessionId, String brokerUserId);
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.keycloak.models;
|
|||
import org.keycloak.provider.Provider;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:bill@burkecentral.com">Bill Burke</a>
|
||||
|
@ -37,6 +38,12 @@ public interface UserSessionProvider extends Provider {
|
|||
List<UserSessionModel> getUserSessionByBrokerUserId(RealmModel realm, String brokerUserId);
|
||||
UserSessionModel getUserSessionByBrokerSessionId(RealmModel realm, String brokerSessionId);
|
||||
|
||||
/**
|
||||
* Return userSession of specified ID as long as the predicate passes. Otherwise returs null.
|
||||
* If predicate doesn't pass, implementation can do some best-effort actions to try have predicate passing (eg. download userSession from other DC)
|
||||
*/
|
||||
UserSessionModel getUserSessionWithPredicate(RealmModel realm, String id, boolean offline, Predicate<UserSessionModel> predicate);
|
||||
|
||||
long getActiveUserSessions(RealmModel realm, ClientModel client);
|
||||
|
||||
/** This will remove attached ClientLoginSessionModels too **/
|
||||
|
|
|
@ -53,7 +53,7 @@ public interface CommonClientSessionModel {
|
|||
// TODO: Not needed here...?
|
||||
public Set<String> getProtocolMappers();
|
||||
public void setProtocolMappers(Set<String> protocolMappers);
|
||||
|
||||
|
||||
public static enum Action {
|
||||
OAUTH_GRANT,
|
||||
CODE_TO_TOKEN,
|
||||
|
|
|
@ -59,6 +59,7 @@ import org.keycloak.services.ErrorResponseException;
|
|||
import org.keycloak.services.managers.AuthenticationManager;
|
||||
import org.keycloak.services.managers.AuthenticationSessionManager;
|
||||
import org.keycloak.services.managers.ClientSessionCode;
|
||||
import org.keycloak.services.managers.UserSessionCrossDCManager;
|
||||
import org.keycloak.services.managers.UserSessionManager;
|
||||
import org.keycloak.sessions.AuthenticationSessionModel;
|
||||
import org.keycloak.util.TokenUtil;
|
||||
|
@ -121,7 +122,9 @@ public class TokenManager {
|
|||
|
||||
public TokenValidation validateToken(KeycloakSession session, UriInfo uriInfo, ClientConnection connection, RealmModel realm, AccessToken oldToken, HttpHeaders headers) throws OAuthErrorException {
|
||||
UserSessionModel userSession = null;
|
||||
if (TokenUtil.TOKEN_TYPE_OFFLINE.equals(oldToken.getType())) {
|
||||
boolean offline = TokenUtil.TOKEN_TYPE_OFFLINE.equals(oldToken.getType());
|
||||
|
||||
if (offline) {
|
||||
|
||||
UserSessionManager sessionManager = new UserSessionManager(session);
|
||||
userSession = sessionManager.findOfflineUserSession(realm, oldToken.getSessionState());
|
||||
|
@ -133,6 +136,8 @@ public class TokenManager {
|
|||
throw new OAuthErrorException(OAuthErrorException.INVALID_GRANT, "Offline session not active", "Offline session not active");
|
||||
}
|
||||
|
||||
} else {
|
||||
throw new OAuthErrorException(OAuthErrorException.INVALID_GRANT, "Offline user session not found", "Offline user session not found");
|
||||
}
|
||||
} else {
|
||||
// Find userSession regularly for online tokens
|
||||
|
@ -143,10 +148,6 @@ public class TokenManager {
|
|||
}
|
||||
}
|
||||
|
||||
if (userSession == null) {
|
||||
throw new OAuthErrorException(OAuthErrorException.INVALID_GRANT, "Offline user session not found", "Offline user session not found");
|
||||
}
|
||||
|
||||
UserModel user = userSession.getUser();
|
||||
if (user == null) {
|
||||
throw new OAuthErrorException(OAuthErrorException.INVALID_GRANT, "Invalid refresh token", "Unknown user");
|
||||
|
@ -159,6 +160,16 @@ public class TokenManager {
|
|||
ClientModel client = session.getContext().getClient();
|
||||
AuthenticatedClientSessionModel clientSession = userSession.getAuthenticatedClientSessions().get(client.getId());
|
||||
|
||||
// Can theoretically happen in cross-dc environment. Try to see if userSession with our client is available in remoteCache
|
||||
if (clientSession == null) {
|
||||
userSession = new UserSessionCrossDCManager(session).getUserSessionWithClient(realm, userSession.getId(), offline, client.getId());
|
||||
if (userSession != null) {
|
||||
clientSession = userSession.getAuthenticatedClientSessions().get(client.getId());
|
||||
} else {
|
||||
throw new OAuthErrorException(OAuthErrorException.INVALID_GRANT, "Session doesn't have required client", "Session doesn't have required client");
|
||||
}
|
||||
}
|
||||
|
||||
if (!client.getClientId().equals(oldToken.getIssuedFor())) {
|
||||
throw new OAuthErrorException(OAuthErrorException.INVALID_GRANT, "Unmatching clients", "Unmatching clients");
|
||||
}
|
||||
|
@ -202,21 +213,15 @@ public class TokenManager {
|
|||
return false;
|
||||
}
|
||||
|
||||
UserSessionModel userSession = session.sessions().getUserSession(realm, token.getSessionState());
|
||||
UserSessionModel userSession = new UserSessionCrossDCManager(session).getUserSessionWithClient(realm, token.getSessionState(), false, client.getId());
|
||||
if (AuthenticationManager.isSessionValid(realm, userSession)) {
|
||||
AuthenticatedClientSessionModel clientSession = userSession.getAuthenticatedClientSessions().get(client.getId());
|
||||
if (clientSession != null) {
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
userSession = session.sessions().getOfflineUserSession(realm, token.getSessionState());
|
||||
userSession = new UserSessionCrossDCManager(session).getUserSessionWithClient(realm, token.getSessionState(), true, client.getId());
|
||||
if (AuthenticationManager.isOfflineSessionValid(realm, userSession)) {
|
||||
AuthenticatedClientSessionModel clientSession = userSession.getAuthenticatedClientSessions().get(client.getId());
|
||||
if (clientSession != null) {
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -65,6 +65,7 @@ import javax.ws.rs.core.MultivaluedMap;
|
|||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.security.MessageDigest;
|
||||
|
@ -399,9 +400,16 @@ public class TokenEndpoint {
|
|||
logger.debugf("Adapter Session '%s' saved in ClientSession for client '%s'. Host is '%s'", adapterSessionId, client.getClientId(), adapterSessionHost);
|
||||
|
||||
event.detail(AdapterConstants.CLIENT_SESSION_STATE, adapterSessionId);
|
||||
clientSession.setNote(AdapterConstants.CLIENT_SESSION_STATE, adapterSessionId);
|
||||
String oldClientSessionState = clientSession.getNote(AdapterConstants.CLIENT_SESSION_STATE);
|
||||
if (!adapterSessionId.equals(oldClientSessionState)) {
|
||||
clientSession.setNote(AdapterConstants.CLIENT_SESSION_STATE, adapterSessionId);
|
||||
}
|
||||
|
||||
event.detail(AdapterConstants.CLIENT_SESSION_HOST, adapterSessionHost);
|
||||
clientSession.setNote(AdapterConstants.CLIENT_SESSION_HOST, adapterSessionHost);
|
||||
String oldClientSessionHost = clientSession.getNote(AdapterConstants.CLIENT_SESSION_HOST);
|
||||
if (!Objects.equals(adapterSessionHost, oldClientSessionHost)) {
|
||||
clientSession.setNote(AdapterConstants.CLIENT_SESSION_HOST, adapterSessionHost);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.keycloak.services.ErrorResponseException;
|
|||
import org.keycloak.services.Urls;
|
||||
import org.keycloak.services.managers.AppAuthManager;
|
||||
import org.keycloak.services.managers.AuthenticationManager;
|
||||
import org.keycloak.services.managers.UserSessionCrossDCManager;
|
||||
import org.keycloak.services.resources.Cors;
|
||||
import org.keycloak.utils.MediaType;
|
||||
|
||||
|
@ -139,18 +140,6 @@ public class UserInfoEndpoint {
|
|||
throw new ErrorResponseException(OAuthErrorException.INVALID_TOKEN, "Token invalid: " + e.getMessage(), Response.Status.UNAUTHORIZED);
|
||||
}
|
||||
|
||||
UserSessionModel userSession = findValidSession(token, event);
|
||||
|
||||
UserModel userModel = userSession.getUser();
|
||||
if (userModel == null) {
|
||||
event.error(Errors.USER_NOT_FOUND);
|
||||
throw new ErrorResponseException(OAuthErrorException.INVALID_REQUEST, "User not found", Response.Status.BAD_REQUEST);
|
||||
}
|
||||
|
||||
event.user(userModel)
|
||||
.detail(Details.USERNAME, userModel.getUsername());
|
||||
|
||||
|
||||
ClientModel clientModel = realm.getClientByClientId(token.getIssuedFor());
|
||||
if (clientModel == null) {
|
||||
event.error(Errors.CLIENT_NOT_FOUND);
|
||||
|
@ -164,12 +153,21 @@ public class UserInfoEndpoint {
|
|||
throw new ErrorResponseException(OAuthErrorException.INVALID_REQUEST, "Client disabled", Response.Status.BAD_REQUEST);
|
||||
}
|
||||
|
||||
AuthenticatedClientSessionModel clientSession = userSession.getAuthenticatedClientSessions().get(clientModel.getId());
|
||||
if (clientSession == null) {
|
||||
event.error(Errors.SESSION_EXPIRED);
|
||||
throw new ErrorResponseException(OAuthErrorException.INVALID_TOKEN, "Session expired", Response.Status.UNAUTHORIZED);
|
||||
UserSessionModel userSession = findValidSession(token, event, clientModel);
|
||||
|
||||
UserModel userModel = userSession.getUser();
|
||||
if (userModel == null) {
|
||||
event.error(Errors.USER_NOT_FOUND);
|
||||
throw new ErrorResponseException(OAuthErrorException.INVALID_REQUEST, "User not found", Response.Status.BAD_REQUEST);
|
||||
}
|
||||
|
||||
event.user(userModel)
|
||||
.detail(Details.USERNAME, userModel.getUsername());
|
||||
|
||||
|
||||
// Existence of authenticatedClientSession for our client already handled before
|
||||
AuthenticatedClientSessionModel clientSession = userSession.getAuthenticatedClientSessions().get(clientModel.getId());
|
||||
|
||||
AccessToken userInfo = new AccessToken();
|
||||
tokenManager.transformUserInfoAccessToken(session, userInfo, realm, clientModel, userModel, userSession, clientSession);
|
||||
|
||||
|
@ -209,14 +207,14 @@ public class UserInfoEndpoint {
|
|||
}
|
||||
|
||||
|
||||
private UserSessionModel findValidSession(AccessToken token, EventBuilder event) {
|
||||
UserSessionModel userSession = session.sessions().getUserSession(realm, token.getSessionState());
|
||||
private UserSessionModel findValidSession(AccessToken token, EventBuilder event, ClientModel client) {
|
||||
UserSessionModel userSession = new UserSessionCrossDCManager(session).getUserSessionWithClient(realm, token.getSessionState(), false, client.getId());
|
||||
UserSessionModel offlineUserSession = null;
|
||||
if (AuthenticationManager.isSessionValid(realm, userSession)) {
|
||||
event.session(userSession);
|
||||
return userSession;
|
||||
} else {
|
||||
offlineUserSession = session.sessions().getOfflineUserSession(realm, token.getSessionState());
|
||||
offlineUserSession = new UserSessionCrossDCManager(session).getUserSessionWithClient(realm, token.getSessionState(), true, client.getId());
|
||||
if (AuthenticationManager.isOfflineSessionValid(realm, offlineUserSession)) {
|
||||
event.session(offlineUserSession);
|
||||
return offlineUserSession;
|
||||
|
@ -225,7 +223,7 @@ public class UserInfoEndpoint {
|
|||
|
||||
if (userSession == null && offlineUserSession == null) {
|
||||
event.error(Errors.USER_SESSION_NOT_FOUND);
|
||||
throw new ErrorResponseException(OAuthErrorException.INVALID_REQUEST, "User session not found", Response.Status.BAD_REQUEST);
|
||||
throw new ErrorResponseException(OAuthErrorException.INVALID_REQUEST, "User session not found or doesn't have client attached on it", Response.Status.UNAUTHORIZED);
|
||||
}
|
||||
|
||||
if (userSession != null) {
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.keycloak.models.ClientModel;
|
|||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.UserSessionModel;
|
||||
import org.keycloak.services.managers.UserSessionCrossDCManager;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
|
@ -54,7 +55,9 @@ public class SamlSessionUtils {
|
|||
return null;
|
||||
}
|
||||
|
||||
UserSessionModel userSession = session.sessions().getUserSession(realm, parts[0]);
|
||||
String userSessionId = parts[0];
|
||||
String clientUUID = parts[1];
|
||||
UserSessionModel userSession = new UserSessionCrossDCManager(session).getUserSessionWithClient(realm, userSessionId, false, clientUUID);
|
||||
if (userSession == null) {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -192,16 +192,12 @@ public class AuthenticationManager {
|
|||
|
||||
// Logout all clientSessions of this user and client
|
||||
public static void backchannelUserFromClient(KeycloakSession session, RealmModel realm, UserModel user, ClientModel client, UriInfo uriInfo, HttpHeaders headers) {
|
||||
String clientId = client.getId();
|
||||
|
||||
List<UserSessionModel> userSessions = session.sessions().getUserSessions(realm, user);
|
||||
for (UserSessionModel userSession : userSessions) {
|
||||
Collection<AuthenticatedClientSessionModel> clientSessions = userSession.getAuthenticatedClientSessions().values();
|
||||
for (AuthenticatedClientSessionModel clientSession : clientSessions) {
|
||||
if (clientSession.getClient().getId().equals(clientId)) {
|
||||
AuthenticationManager.backchannelLogoutClientSession(session, realm, clientSession, userSession, uriInfo, headers);
|
||||
TokenManager.dettachClientSession(session.sessions(), realm, clientSession);
|
||||
}
|
||||
AuthenticatedClientSessionModel clientSession = userSession.getAuthenticatedClientSessions().get(client.getId());
|
||||
if (clientSession != null) {
|
||||
AuthenticationManager.backchannelLogoutClientSession(session, realm, clientSession, userSession, uriInfo, headers);
|
||||
TokenManager.dettachClientSession(session.sessions(), realm, clientSession);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.OAuth2Constants;
|
||||
import org.keycloak.models.AuthenticatedClientSessionModel;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
|
@ -120,9 +119,13 @@ class CodeGenerateUtil {
|
|||
String userSessionId = parts[2];
|
||||
String clientUUID = parts[3];
|
||||
|
||||
UserSessionModel userSession = session.sessions().getUserSession(realm, userSessionId);
|
||||
UserSessionModel userSession = new UserSessionCrossDCManager(session).getUserSessionWithClientAndCodeToTokenAction(realm, userSessionId, clientUUID);
|
||||
if (userSession == null) {
|
||||
return null;
|
||||
// TODO:mposolda Temporary workaround needed to track if code is invalid or was already used. Will be good to remove once used OAuth codes are tracked through one-time cache
|
||||
userSession = session.sessions().getUserSession(realm, userSessionId);
|
||||
if (userSession == null) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
return userSession.getAuthenticatedClientSessions().get(clientUUID);
|
||||
|
|
|
@ -135,20 +135,6 @@ public class ResourceAdminManager {
|
|||
}
|
||||
}
|
||||
|
||||
public void logoutUserFromClient(URI requestUri, RealmModel realm, ClientModel resource, UserModel user) {
|
||||
List<UserSessionModel> userSessions = session.sessions().getUserSessions(realm, user);
|
||||
List<AuthenticatedClientSessionModel> ourAppClientSessions = new LinkedList<>();
|
||||
if (userSessions != null) {
|
||||
for (UserSessionModel userSession : userSessions) {
|
||||
AuthenticatedClientSessionModel clientSession = userSession.getAuthenticatedClientSessions().get(resource.getId());
|
||||
if (clientSession != null) {
|
||||
ourAppClientSessions.add(clientSession);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logoutClientSessions(requestUri, realm, resource, ourAppClientSessions);
|
||||
}
|
||||
|
||||
public boolean logoutClientSession(URI requestUri, RealmModel realm, ClientModel resource, AuthenticatedClientSessionModel clientSession) {
|
||||
return logoutClientSessions(requestUri, realm, resource, Arrays.asList(clientSession));
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright 2017 Red Hat, Inc. and/or its affiliates
|
||||
* and other contributors as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.keycloak.services.managers;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
import org.keycloak.models.AuthenticatedClientSessionModel;
|
||||
import org.keycloak.models.KeycloakSession;
|
||||
import org.keycloak.models.RealmModel;
|
||||
import org.keycloak.models.UserSessionModel;
|
||||
import org.keycloak.sessions.CommonClientSessionModel;
|
||||
|
||||
/**
|
||||
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
|
||||
*/
|
||||
public class UserSessionCrossDCManager {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(UserSessionCrossDCManager.class);
|
||||
|
||||
private final KeycloakSession kcSession;
|
||||
|
||||
public UserSessionCrossDCManager(KeycloakSession session) {
|
||||
this.kcSession = session;
|
||||
}
|
||||
|
||||
|
||||
// get userSession if it has "authenticatedClientSession" of specified client attached to it. Otherwise download it from remoteCache
|
||||
public UserSessionModel getUserSessionWithClient(RealmModel realm, String id, boolean offline, String clientUUID) {
|
||||
return kcSession.sessions().getUserSessionWithPredicate(realm, id, offline, userSession -> userSession.getAuthenticatedClientSessions().containsKey(clientUUID));
|
||||
}
|
||||
|
||||
|
||||
// get userSession if it has "authenticatedClientSession" of specified client attached to it and there is "CODE_TO_TOKEN" action. Otherwise download it from remoteCache
|
||||
// TODO Probably remove this method once AuthenticatedClientSession.getAction is removed and information is moved to OAuth code JWT instead
|
||||
public UserSessionModel getUserSessionWithClientAndCodeToTokenAction(RealmModel realm, String id, String clientUUID) {
|
||||
|
||||
return kcSession.sessions().getUserSessionWithPredicate(realm, id, false, (UserSessionModel userSession) -> {
|
||||
|
||||
Map<String, AuthenticatedClientSessionModel> authSessions = userSession.getAuthenticatedClientSessions();
|
||||
if (!authSessions.containsKey(clientUUID)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
AuthenticatedClientSessionModel authSession = authSessions.get(clientUUID);
|
||||
return CommonClientSessionModel.Action.CODE_TO_TOKEN.toString().equals(authSession.getAction());
|
||||
|
||||
});
|
||||
}
|
||||
}
|
|
@ -171,7 +171,7 @@ public class UserStorageSyncManager {
|
|||
|
||||
}
|
||||
UserStorageProviderClusterEvent event = UserStorageProviderClusterEvent.createEvent(removed, realm.getId(), provider);
|
||||
session.getProvider(ClusterProvider.class).notify(USER_STORAGE_TASK_KEY, event, false);
|
||||
session.getProvider(ClusterProvider.class).notify(USER_STORAGE_TASK_KEY, event, false, ClusterProvider.DCNotify.ALL_DCS);
|
||||
}
|
||||
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue