Add Multi-AZ Aurora DB to CI store-integration-tests

Closes #26730

Signed-off-by: Ryan Emerson <remerson@redhat.com>
This commit is contained in:
Ryan Emerson 2024-02-14 15:51:08 +00:00 committed by GitHub
parent 9f1da8dbf2
commit 67f6f2f657
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 889 additions and 3 deletions

View file

@ -0,0 +1,32 @@
name: Create Aurora Database
description: Create AWS Aurora Database
inputs:
name:
description: 'The name of the Aurora DB cluster to deploy'
required: true
region:
description: 'The AWS region used to host the Aurora DB'
required: true
password:
description: 'The master password of the Aurora DB cluster'
required: false
outputs:
endpoint:
description: 'The Endpoint URL for Aurora clients to connect to'
value: ${{ steps.create.outputs.endpoint }}
runs:
using: "composite"
steps:
- id: create
shell: bash
run: |
source ./aurora_create.sh
echo "endpoint=${AURORA_ENDPOINT}" >> $GITHUB_OUTPUT
working-directory: .github/scripts/aws/rds
env:
AURORA_CLUSTER: ${{ inputs.name }}
AURORA_PASSWORD: ${{ inputs.password }}
AURORA_REGION: ${{ inputs.region }}

View file

@ -0,0 +1,21 @@
name: Delete Aurora Database
description: Delete AWS Aurora Database
inputs:
name:
description: 'The name of the Aurora DB cluster to delete'
required: true
region:
description: 'The AWS region hosting the Aurora DB'
required: true
runs:
using: "composite"
steps:
- id: delete
shell: bash
run: ./aurora_delete.sh
working-directory: .github/scripts/aws/rds
env:
AURORA_CLUSTER: ${{ inputs.name }}
AURORA_REGION: ${{ inputs.region }}

7
.github/scripts/ansible/.gitignore vendored Normal file
View file

@ -0,0 +1,7 @@
# Ansible
###########
*_inventory.yml
*.pem
ansible.log
files/
env.yml

8
.github/scripts/ansible/ansible.cfg vendored Normal file
View file

@ -0,0 +1,8 @@
[defaults]
#log_path = ./ansible.log
host_key_checking=False
transport = ssh
forks = 50
[ssh_connection]
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes -o StrictHostKeyChecking=no -o IdentitiesOnly=yes

26
.github/scripts/ansible/aws_ec2.sh vendored Executable file
View file

@ -0,0 +1,26 @@
#!/usr/bin/env bash
set -e
cd $(dirname "${BASH_SOURCE[0]}")
if [[ "$RUNNER_DEBUG" == "1" ]]; then
set -x
fi
OPERATION=$1
REGION=$2
case $OPERATION in
requirements)
ansible-galaxy collection install -r requirements.yml
pip3 install --user ansible boto3 botocore
;;
create|delete|start|stop)
if [ -f "env.yml" ]; then ANSIBLE_CUSTOM_VARS_ARG="-e @env.yml"; fi
CLUSTER_NAME=${CLUSTER_NAME:-"keycloak_$(whoami)"}
ansible-playbook aws_ec2.yml -v -e "region=$REGION" -e "operation=$OPERATION" -e "cluster_name=$CLUSTER_NAME" $ANSIBLE_CUSTOM_VARS_ARG "${@:3}"
;;
*)
echo "Invalid option!"
echo "Available operations: requirements, create, delete, start, stop."
;;
esac

3
.github/scripts/ansible/aws_ec2.yml vendored Normal file
View file

@ -0,0 +1,3 @@
- hosts: localhost
connection: local
roles: [aws_ec2]

2
.github/scripts/ansible/keycloak.yml vendored Normal file
View file

@ -0,0 +1,2 @@
- hosts: keycloak
roles: [keycloak_ec2_installer]

View file

@ -0,0 +1,15 @@
#!/usr/bin/env bash
set -e
cd $(dirname "${BASH_SOURCE[0]}")
if [[ "$RUNNER_DEBUG" == "1" ]]; then
set -x
fi
REGION=$1
KEYCLOAK_SRC=$2
CLUSTER_NAME=${CLUSTER_NAME:-"keycloak_$(whoami)"}
ansible-playbook -i ${CLUSTER_NAME}_${REGION}_inventory.yml keycloak.yml \
-e "keycloak_src=\"${KEYCLOAK_SRC}\""

2
.github/scripts/ansible/mvn.yml vendored Normal file
View file

@ -0,0 +1,2 @@
- hosts: keycloak
roles: [mvn_ec2_runner]

15
.github/scripts/ansible/mvn_ec2_runner.sh vendored Executable file
View file

@ -0,0 +1,15 @@
#!/usr/bin/env bash
set -e
cd $(dirname "${BASH_SOURCE[0]}")
if [[ "$RUNNER_DEBUG" == "1" ]]; then
set -x
fi
REGION=$1
MVN_PARAMS=${@:2}
CLUSTER_NAME=${CLUSTER_NAME:-"keycloak_$(whoami)"}
ansible-playbook -i ${CLUSTER_NAME}_${REGION}_inventory.yml mvn.yml \
-e "mvn_params=\"${MVN_PARAMS}\""

View file

@ -0,0 +1,3 @@
collections:
- name: amazon.aws
version: 6.0.0

View file

@ -0,0 +1,91 @@
# Ansible Role `aws_ec2`
Ansible role for creating, deleting, stopping and starting AWS EC2 instances
for running keycloak tests.
## Prerequisities
Role requires Ansible Collection `amazon.aws` version `6.0.0` or higher.
Role assumes that user is authenticated to use AWS CLI, ie. that authentication
variables `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` are set in the environment.
## Parameters
- `region`: AWS region for the resources to be created in.
- `cluster_name`: Unique name of the instance cluster within the region. Defaults to `keycloak_{{ cluster_identifier }}`.
- `cluster_identifier`: Identifier to distingish multiple clusters within the region. Defaults to `${USER}`.
- `cluster_size`: Number of EC2 instances to be created.
- `ami_name`: Name of the AMI image to be used for spawning instances.
- `instance_type`: [AWS instance type](https://aws.amazon.com/ec2/instance-types/).
- `instance_volume_size`: Size of instance storage device.
- `instance_device`: Path to Linux storage device.
For defaults see `defaults/main.yml`.
## Example Playbook
Example playbook `aws_ec2.yml`:
```
- hosts: localhost
connection: local
roles: [aws_ec2_client]
```
## Create Instances
Using the example playbook run:
```
ansible-playbook aws_ec2.yml -e region=<REGION> -e operation=create
```
Replace <REGION> with actual value, e.g. `us-west-1`.
Optionally you can override other parameters by `-e PARAMETER=VALUE` or `-e @PARAMS.yml`.
This operation will create the following 2 files:
- `{{ cluster_name }}_{{ region }}.pem` - private SSH key.
- `{{ cluster_name }}_{{ region }}_inventory.yml` - an Ansible host inventory file.
```
keycloak:
children:
"{{ cluster_name }}_{{ region }}":
vars:
ansible_user: ec2-user
ansible_become: yes
ansible_ssh_private_key_file: "{{ cluster_name }}_{{ region }}.pem"
hosts:
host-1-ip-address:
host-2-ip-address:
...
```
Notice that the created hosts will be included in Ansible group `keycloak`
and subgroup `{{ cluster_name }}_{{ region }}`.
## Stop and Start instances
Using the example playbook run:
```
ansible-playbook aws_ec2.yml -e region=<REGION> -e operation=stop
```
After the instances are stopped their public IP addresses will be de-allocated.
```
ansible-playbook aws_ec2.yml -e region=<REGION> -e operation=start
```
After the instances are started again the role will re-create the host inventory file with updated public IP addresses.
## Delete Instances
Using the example playbook run:
```
ansible-playbook aws_ec2.yml -e region=<REGION> -e operation=delete
```
This will remove created AWS resources and delete the host inventory file and private key.

View file

@ -0,0 +1,13 @@
cluster_identifier: "{{ lookup('env', 'USER') }}"
cluster_name: "keycloak_{{ cluster_identifier }}"
cluster_size: 1
cidr_ip: "{{ control_host_ip.stdout }}/32"
ami_name: RHEL-8.8.0_HVM-20230503-x86_64-54-Hourly2-GP2
instance_type: t3.large
instance_volume_size: 20
instance_device: /dev/sda1
no_log_sensitive: true

View file

@ -0,0 +1,68 @@
- name: Get Ansible Control Host's public IP
shell: curl -ks --ipv4 https://ifconfig.me
register: control_host_ip
no_log: "{{ no_log_sensitive }}"
- debug: var=cidr_ip
- name: Create Security Group
amazon.aws.ec2_group:
state: present
region: '{{ region }}'
name: '{{ cluster_name }}'
description: '{{ cluster_name }}'
rules:
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: '{{cidr_ip}}'
register: group
no_log: "{{ no_log_sensitive }}"
- name: Create Key
amazon.aws.ec2_key:
state: present
region: '{{ region }}'
name: '{{ cluster_name }}'
register: key
no_log: "{{ no_log_sensitive }}"
- name: Save Private Key on Ansible Control Machine
when: key.changed
copy:
content: '{{ key.key.private_key }}'
dest: '{{ cluster_name }}_{{ region }}.pem'
mode: 0600
no_log: "{{ no_log_sensitive }}"
- name: Look up AMI '{{ ami_name }}'
amazon.aws.ec2_ami_info:
region: '{{ region}}'
filters:
name: '{{ ami_name }}'
register: ami_info
- name: Create {{ cluster_size }} EC2 Instances
amazon.aws.ec2_instance:
state: started
region: '{{ region }}'
name: "{{ cluster_name }}"
exact_count: "{{ cluster_size }}"
instance_type: '{{ instance_type }}'
image_id: '{{ ami_info.images[0].image_id }}'
key_name: '{{ cluster_name }}'
security_group: '{{ group.group_id }}'
network:
assign_public_ip: yes
volumes:
- device_name: '{{ instance_device }}'
ebs:
volume_size: '{{ instance_volume_size }}'
delete_on_termination: true
register: instances
no_log: "{{ no_log_sensitive }}"
- name: Create Inventory File
template:
src: inventory.yml.j2
dest: '{{ cluster_name }}_{{ region }}_inventory.yml'

View file

@ -0,0 +1,26 @@
- name: 'Delete EC2 instances'
amazon.aws.ec2_instance:
state: absent
region: '{{ region }}'
filters:
"tag:Name": '{{ cluster_name }}*'
- name: 'Delete EC2 security group'
amazon.aws.ec2_group:
state: absent
region: '{{ region }}'
name: '{{ cluster_name }}'
- name: 'Delete Key'
amazon.aws.ec2_key:
state: absent
region: '{{ region }}'
name: '{{ cluster_name }}'
- name: 'Delete inventory, key, and log'
file:
state: absent
path: '{{ item }}'
with_items:
- '{{ cluster_name }}_{{ region }}_inventory.yml'
- '{{ cluster_name }}_{{ region }}.pem'

View file

@ -0,0 +1,12 @@
- debug: var=cluster_identifier
- debug: var=region
- debug: var=cluster_name
- include_tasks: create-resources.yml
when: operation == "create"
- include_tasks: manage-instances.yml
when: operation == "start" or operation == "stop"
- include_tasks: delete-resources.yml
when: operation == "delete"

View file

@ -0,0 +1,26 @@
# Start or Stop Instances
- name: "{{ operation[0]|upper }}{{ operation[1:] }} Instances"
amazon.aws.ec2_instance:
state: '{{ "stopped" if operation == "stop" else "started" }}'
region: '{{ region }}'
filters:
"tag:Name": '{{ cluster_name }}*'
instance-state-name: ['running', 'stopped', 'stopping']
no_log: "{{ no_log_sensitive }}"
- when: operation == "start"
block:
# When starting instances via `ec2_instance` module sometimes the `public_ip_address` is missing in the result.
# Added additional `ec2_instance_info` step to work around the issue.
- name: Get Instance Information
amazon.aws.ec2_instance_info:
region: '{{ region }}'
filters:
"tag:Name": '{{ cluster_name }}*'
instance-state-name: ['running']
register: instances
no_log: "{{ no_log_sensitive }}"
- name: Recreate Inventory File
template:
src: inventory.yml.j2
dest: '{{ cluster_name }}_{{ region }}_inventory.yml'

View file

@ -0,0 +1,11 @@
keycloak:
children:
{{ cluster_name }}_{{ region | replace('-','_') }}:
vars:
ansible_user: ec2-user
ansible_become: yes
ansible_ssh_private_key_file: {{ cluster_name }}_{{ region }}.pem
hosts:
{% for instance in instances.instances %}
{{ instance.public_ip_address }}:
{% endfor %}

View file

@ -0,0 +1,35 @@
# Ansible Role `keycloak_ec2_installer`
Ansible role for installing Keycloak sources and build dependencies on remote nodes.
Role assumes presence of host inventory file and a matching SSH key for "sudoer" access to the hosts.
The hosts are expected to be included in `keycloak` group.
## Parameters
See `defaults/main.yml` for default values.
### Execution
- `keycloak_src`: Path to a local `*.zip` file containing the Keycloak src
### Other
- `update_system_packages`: Whether to update the system packages. Defaults to `no`.
- `install_java`: Whether to install OpenJDK on the system. Defaults to `yes`.
- `java_version`: Version of OpenJDK to be installed. Defaults to `17`.
## Example Playbook
An example playbook `keycloak.yml` that applies the role to hosts in the `keycloak` group:
```
- hosts: keycloak
roles: [keycloak]
```
## Run keycloak-benchmark
Run:
```
ansible-playbook -i ${CLUSTER_NAME}_${REGION}_inventory.yml keycloak.yml \
-e "keycloak_src=\"/tmp/keycloak.zip\""
```

View file

@ -0,0 +1,7 @@
# This should match the user in the *_inventory.yml
ansible_ssh_user: ec2-user
# Workspace on the remote hosts
kc_home: /opt/keycloak
update_system_packages: no
install_java: yes
java_version: 17

View file

@ -0,0 +1,29 @@
- name: Update system packages on the remote hosts
when: update_system_packages
package:
name: "*"
state: latest
- name: Install Java {{ java_version }} packages on the remote hosts
when: install_java
package:
name:
- "java-{{ java_version }}-openjdk"
- "java-{{ java_version }}-openjdk-devel"
state: present
- name: Install dependencies on the remote hosts
package: name={{item}} state=present
with_items:
- unzip
- name: Create Keycloak src dir
file:
path: "{{ kc_home }}"
state: directory
- name: Install Keycloak src on the remote hosts
unarchive:
src: "{{ keycloak_src }}"
dest: "{{ kc_home }}"
owner: "{{ ansible_ssh_user }}"

View file

@ -0,0 +1,3 @@
- include_tasks: install.yml
vars:
ansible_become: yes

View file

@ -0,0 +1,33 @@
# Ansible Role `mvn_ec2_runner`
Ansible role for executing `mvn` commands against a Keycloak src on a remote node.
Role assumes presence of host inventory file and a matching SSH key for "sudoer" access to the hosts.
The hosts are expected to be included in `keycloak` group.
## Parameters
See `defaults/main.yml` for default values.
### Execution
- `mvn_params`: The `mvn` command to execute on the remote nodes.
### Other
- `kc_home`: Location of the Keycloak src on the remote node.
## Example Playbook
An example playbook `keycloak.yml` that applies the role to hosts in the `keycloak` group:
```
- hosts: keycloak
roles: [mvn]
```
## Run keycloak-benchmark
Run:
```
ansible-playbook -i ${CLUSTER_NAME}_${REGION}_inventory.yml mvn.yml \
-e "mvn_params=\"mvn clean install\""
```

View file

@ -0,0 +1,5 @@
# Workspace on the localhost
local_workspace: files/keycloak
# Workspace on the remote hosts
kc_home: /opt/keycloak

View file

@ -0,0 +1 @@
- include_tasks: run.yml

View file

@ -0,0 +1,25 @@
- name: Initialization
run_once: yes
block:
- debug: msg="Variable `mvn_params` must be set."
failed_when: mvn_params == ""
- set_fact: local_results_dir="{{ local_workspace }}/results/{{ '%Y%m%d%H%M%S' | strftime }}"
- debug: var=local_results_dir
- name: Cleanup Previous Runs
# Kill any currently running Java process from a previous (possibly aborted) run before starting the next.
shell: |
killall java
ignore_errors: yes
- name: Run mvn command on the remote hosts
# Kill any currently running Java process from a previous (possibly aborted) run before starting the next.
shell: |
cd {{ kc_home }}
./mvnw {{ mvn_params }}
# Executing load run can be scheduled for hours. To prevent the test from failing when the SSH connection breaks, use asynchronous polling.
async: 86400
poll: 10
register: result
- debug: var=result

27
.github/scripts/aws/rds/aurora_common.sh vendored Executable file
View file

@ -0,0 +1,27 @@
#!/usr/bin/env bash
set -e
function requiredEnv() {
for ENV in $@; do
if [ -z "${!ENV}" ]; then
echo "${ENV} variable must be set"
exit 1
fi
done
}
requiredEnv AURORA_CLUSTER AURORA_REGION
export AURORA_ENGINE=${AURORA_ENGINE:-"aurora-postgresql"}
export AURORA_ENGINE_VERSION=${AURORA_ENGINE_VERSION:-"15.3"}
export AURORA_INSTANCES=${AURORA_INSTANCES:-"2"}
export AURORA_INSTANCE_CLASS=${AURORA_INSTANCE_CLASS:-"db.t4g.large"}
export AURORA_PASSWORD=${AURORA_PASSWORD:-"secret99"}
export AURORA_SECURITY_GROUP_NAME=${AURORA_SECURITY_GROUP_NAME:-"${AURORA_CLUSTER}-security-group"}
export AURORA_SUBNET_A_CIDR=${AURORA_SUBNET_A_CIDR:-"192.168.0.0/19"}
export AURORA_SUBNET_B_CIDR=${AURORA_SUBNET_B_CIDR:-"192.168.32.0/19"}
export AURORA_SUBNET_GROUP_NAME=${AURORA_SUBNET_GROUP_NAME:-"${AURORA_CLUSTER}-subnet-group"}
export AURORA_VPC_CIDR=${AURORA_VPC_CIDR:-"192.168.0.0/16"}
export AURORA_USERNAME=${AURORA_USERNAME:-"keycloak"}
export AWS_REGION=${AWS_REGION:-${AURORA_REGION}}
export AWS_PAGER=""

135
.github/scripts/aws/rds/aurora_create.sh vendored Executable file
View file

@ -0,0 +1,135 @@
#!/usr/bin/env bash
set -e
if [[ "$RUNNER_DEBUG" == "1" ]]; then
set -x
fi
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source ${SCRIPT_DIR}/aurora_common.sh
EXISTING_INSTANCES=$(aws rds describe-db-instances \
--query "DBInstances[?starts_with(DBInstanceIdentifier, '${AURORA_CLUSTER}')].DBInstanceIdentifier" \
--output text
)
if [ -n "${EXISTING_INSTANCES}" ]; then
echo "Aurora instances '${EXISTING_INSTANCES}' already exist in the '${AWS_REGION}' region"
exit 1
fi
# Create the Aurora VPC
AURORA_VPC=$(aws ec2 create-vpc \
--cidr-block ${AURORA_VPC_CIDR} \
--tag-specifications "ResourceType=vpc, Tags=[{Key=AuroraCluster,Value=${AURORA_CLUSTER}},{Key=Name,Value=Aurora Cluster ${AURORA_CLUSTER}}]" \
--query "Vpc.VpcId" \
--output text
)
# Each region may have different availability-zones, so we need to ensure that we use an az that exists
IFS=' ' read -a AZS <<< "$(aws ec2 describe-availability-zones --region ${AURORA_REGION} --query "AvailabilityZones[].ZoneName" --output text)"
# Create the Aurora Subnets
SUBNET_A=$(aws ec2 create-subnet \
--availability-zone "${AZS[0]}" \
--vpc-id ${AURORA_VPC} \
--cidr-block ${AURORA_SUBNET_A_CIDR} \
--query "Subnet.SubnetId" \
--output text
)
SUBNET_B=$(aws ec2 create-subnet \
--availability-zone "${AZS[1]}" \
--vpc-id ${AURORA_VPC} \
--cidr-block ${AURORA_SUBNET_B_CIDR} \
--query "Subnet.SubnetId" \
--output text
)
AURORA_PUBLIC_ROUTE_TABLE_ID=$(aws ec2 describe-route-tables \
--filters Name=vpc-id,Values=${AURORA_VPC} \
--query "RouteTables[0].RouteTableId" \
--output text
)
aws ec2 associate-route-table \
--route-table-id ${AURORA_PUBLIC_ROUTE_TABLE_ID} \
--subnet-id ${SUBNET_A}
aws ec2 associate-route-table \
--route-table-id ${AURORA_PUBLIC_ROUTE_TABLE_ID} \
--subnet-id ${SUBNET_B}
# Create Aurora Subnet Group
aws rds create-db-subnet-group \
--db-subnet-group-name ${AURORA_SUBNET_GROUP_NAME} \
--db-subnet-group-description "Aurora DB Subnet Group" \
--subnet-ids ${SUBNET_A} ${SUBNET_B}
# Create an Aurora VPC Security Group
AURORA_SECURITY_GROUP_ID=$(aws ec2 create-security-group \
--group-name ${AURORA_SECURITY_GROUP_NAME} \
--description "Aurora DB Security Group" \
--vpc-id ${AURORA_VPC} \
--query "GroupId" \
--output text
)
# Make the Aurora endpoint accessible outside the VPC
## Create Internet gateway
INTERNET_GATEWAY=$(aws ec2 create-internet-gateway \
--tag-specifications "ResourceType=internet-gateway, Tags=[{Key=AuroraCluster,Value=${AURORA_CLUSTER}},{Key=Name,Value=Aurora Cluster ${AURORA_CLUSTER}}]" \
--query "InternetGateway.InternetGatewayId" \
--output text
)
aws ec2 attach-internet-gateway \
--internet-gateway-id ${INTERNET_GATEWAY} \
--vpc-id ${AURORA_VPC}
aws ec2 create-route \
--route-table-id ${AURORA_PUBLIC_ROUTE_TABLE_ID} \
--destination-cidr-block 0.0.0.0/0 \
--gateway-id ${INTERNET_GATEWAY}
## Enable DNS hostnames required for publicly accessible Aurora instances
aws ec2 modify-vpc-attribute \
--vpc-id ${AURORA_VPC} \
--enable-dns-hostnames
## Ensure the Postgres port is accessible outside the VPC
aws ec2 authorize-security-group-ingress \
--group-id ${AURORA_SECURITY_GROUP_ID} \
--ip-permissions "FromPort=5432,ToPort=5432,IpProtocol=tcp,IpRanges=[{CidrIp=0.0.0.0/0}]"
# Create the Aurora DB cluster and instance
aws rds create-db-cluster \
--db-cluster-identifier ${AURORA_CLUSTER} \
--database-name keycloak \
--engine ${AURORA_ENGINE} \
--engine-version ${AURORA_ENGINE_VERSION} \
--master-username ${AURORA_USERNAME} \
--master-user-password ${AURORA_PASSWORD} \
--vpc-security-group-ids ${AURORA_SECURITY_GROUP_ID} \
--db-subnet-group-name ${AURORA_SUBNET_GROUP_NAME} \
--tags "Key=keepalive" # Add keepalive tag to prevent keycloak-benchmark reaper from removing DB during nightly runs
# For now only two AZs in each region are supported due to the two subnets created above
for i in $( seq ${AURORA_INSTANCES} ); do
aws rds create-db-instance \
--db-cluster-identifier ${AURORA_CLUSTER} \
--db-instance-identifier "${AURORA_CLUSTER}-instance-${i}" \
--db-instance-class ${AURORA_INSTANCE_CLASS} \
--engine ${AURORA_ENGINE} \
--availability-zone "${AZS[$(((i - 1) % ${#AZS[@]}))]}" \
--publicly-accessible
done
for i in $( seq ${AURORA_INSTANCES} ); do
aws rds wait db-instance-available --db-instance-identifier "${AURORA_CLUSTER}-instance-${i}"
done
export AURORA_ENDPOINT=$(aws rds describe-db-clusters \
--db-cluster-identifier ${AURORA_CLUSTER} \
--query "DBClusters[*].Endpoint" \
--output text
)

79
.github/scripts/aws/rds/aurora_delete.sh vendored Executable file
View file

@ -0,0 +1,79 @@
#!/usr/bin/env bash
set -e
if [[ "$RUNNER_DEBUG" == "1" ]]; then
set -x
fi
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source ${SCRIPT_DIR}/aurora_common.sh
AURORA_VPC=$(aws ec2 describe-vpcs \
--filters "Name=tag:AuroraCluster,Values=${AURORA_CLUSTER}" \
--query "Vpcs[*].VpcId" \
--output text
)
# Delete the Aurora DB cluster and instances
for i in $( aws rds describe-db-clusters --db-cluster-identifier ${AURORA_CLUSTER} --query "DBClusters[0].DBClusterMembers[].DBInstanceIdentifier" --output text ); do
echo "Deleting Aurora DB instance ${i}"
aws rds delete-db-instance --db-instance-identifier "${i}" --skip-final-snapshot || true
done
aws rds delete-db-cluster \
--db-cluster-identifier ${AURORA_CLUSTER} \
--skip-final-snapshot \
|| true
for i in $( aws rds describe-db-clusters --db-cluster-identifier ${AURORA_CLUSTER} --query "DBClusters[0].DBClusterMembers[].DBInstanceIdentifier" --output text ); do
aws rds wait db-instance-deleted --db-instance-identifier "${i}"
done
aws rds wait db-cluster-deleted --db-cluster-identifier ${AURORA_CLUSTER} || true
# Delete the Aurora subnet group
aws rds delete-db-subnet-group --db-subnet-group-name ${AURORA_SUBNET_GROUP_NAME} || true
# Delete the Aurora subnets
AURORA_SUBNETS=$(aws ec2 describe-subnets \
--filters "Name=vpc-id,Values=${AURORA_VPC}" \
--query "Subnets[*].SubnetId" \
--output text
)
for AURORA_SUBNET in ${AURORA_SUBNETS}; do
aws ec2 delete-subnet --subnet-id ${AURORA_SUBNET}
done
# Delete the Aurora VPC Security Group
AURORA_SECURITY_GROUP_ID=$(aws ec2 describe-security-groups \
--filters "Name=vpc-id,Values=${AURORA_VPC}" "Name=group-name,Values=${AURORA_SECURITY_GROUP_NAME}" \
--query "SecurityGroups[*].GroupId" \
--output text
)
if [ -n "${AURORA_SECURITY_GROUP_ID}" ]; then
aws ec2 delete-security-group --group-id ${AURORA_SECURITY_GROUP_ID} --region ${AURORA_REGION}
fi
# Detach the internet gateway from the VPC and remove
INTERNET_GATEWAY=$(aws ec2 describe-internet-gateways \
--filters "Name=tag:AuroraCluster,Values=${AURORA_CLUSTER}" \
--query "InternetGateways[*].InternetGatewayId" \
--output text
)
aws ec2 detach-internet-gateway \
--internet-gateway-id ${INTERNET_GATEWAY} \
--vpc-id ${AURORA_VPC} \
|| true
aws ec2 delete-internet-gateway --internet-gateway-id ${INTERNET_GATEWAY} || true
# Delete the Aurora VPC, retrying 5 times in case that dependencies are not removed instantly
n=0
until [ "$n" -ge 20 ]
do
aws ec2 delete-vpc --vpc-id ${AURORA_VPC} && break
n=$((n+1))
echo "Unable to remove VPC ${AURORA_VPC}. Attempt ${n}"
sleep 10
done

View file

@ -31,6 +31,7 @@ jobs:
ci: ${{ steps.conditional.outputs.ci }}
ci-store: ${{ steps.conditional.outputs.ci-store }}
ci-sssd: ${{ steps.conditional.outputs.ci-sssd }}
ci-store-matrix: ${{ steps.conditional-stores.outputs.matrix }}
steps:
- uses: actions/checkout@v4
@ -39,6 +40,14 @@ jobs:
with:
token: ${{ secrets.GITHUB_TOKEN }}
- id: conditional-stores
run: |
STORES="postgres, mysql, oracle, mssql, mariadb"
if [ $GITHUB_EVENT_NAME != "pull_request" ]; then
STORES+=", aurora-postgres"
fi
echo "matrix=$(echo $STORES | jq -Rc 'split(", ")')" >> $GITHUB_OUTPUT
build:
name: Build
if: needs.conditional.outputs.ci == 'true'
@ -299,19 +308,75 @@ jobs:
needs: [build, conditional]
if: needs.conditional.outputs.ci-store == 'true'
runs-on: ubuntu-latest
timeout-minutes: 75
timeout-minutes: 150
strategy:
matrix:
db: [postgres, mysql, oracle, mssql, mariadb]
db: ${{ fromJson(needs.conditional.outputs.ci-store-matrix) }}
fail-fast: false
steps:
- uses: actions/checkout@v4
- id: aurora-init
name: Initialize Aurora environment
if: ${{ matrix.db == 'aurora-postgres' }}
run: |
AWS_REGION=us-east-1
echo "Region: ${AWS_REGION}"
aws configure set aws_access_key_id ${{ secrets.AWS_ACCESS_KEY_ID }}
aws configure set aws_secret_access_key ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws configure set region ${AWS_REGION}
PASS=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 13; echo)
echo "::add-mask::${PASS}"
echo "name=gh-action-$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
echo "password=${PASS}" >> $GITHUB_OUTPUT
echo "region=${AWS_REGION}" >> $GITHUB_OUTPUT
- id: aurora-create
name: Create Aurora DB
if: ${{ matrix.db == 'aurora-postgres' }}
uses: ./.github/actions/aurora-create-database
with:
name: ${{ steps.aurora-init.outputs.name }}
password: ${{ steps.aurora-init.outputs.password }}
region: ${{ steps.aurora-init.outputs.region }}
- id: integration-test-setup
name: Integration test setup
if: ${{ matrix.db != 'aurora-postgres' }}
uses: ./.github/actions/integration-test-setup
- name: Run Aurora tests on EC2
id: aurora-tests
if: ${{ matrix.db == 'aurora-postgres' }}
run: |
PROPS="-Dauth.server.db.host=${{ steps.aurora-create.outputs.endpoint }}"
PROPS+=" -Dkeycloak.connectionsJpa.password=${{ steps.aurora-init.outputs.password }}"
REGION=${{ steps.aurora-init.outputs.region }}
curl --fail-with-body https://truststore.pki.rds.amazonaws.com/${REGION}/${REGION}-bundle.pem -o aws.pem
PROPS+=" -Dkeycloak.connectionsJpa.jdbcParameters=\"?ssl=true&sslmode=verify-ca&sslrootcert=/opt/keycloak/aws.pem\""
TESTS=`testsuite/integration-arquillian/tests/base/testsuites/suite.sh database`
echo "Tests: $TESTS"
git archive --format=zip --output /tmp/keycloak.zip $GITHUB_REF
zip -u /tmp/keycloak.zip aws.pem
cd .github/scripts/ansible
export CLUSTER_NAME=keycloak_$(git rev-parse --short HEAD)
echo "ec2_cluster=${CLUSTER_NAME}" >> $GITHUB_OUTPUT
./aws_ec2.sh requirements
./aws_ec2.sh create ${REGION}
./keycloak_ec2_installer.sh ${REGION} /tmp/keycloak.zip
./mvn_ec2_runner.sh ${REGION} "clean install -DskipTests -Pdistribution"
./mvn_ec2_runner.sh ${REGION} "clean install -DskipTests -pl testsuite/integration-arquillian/servers/auth-server/quarkus -Pauth-server-quarkus -Dmaven.build.cache.enabled=true"
./mvn_ec2_runner.sh ${REGION} "test ${{ env.SUREFIRE_RETRY }} -Pauth-server-quarkus -Pdb-${{ matrix.db }} $PROPS -Dtest=$TESTS -pl testsuite/integration-arquillian/tests/base"
- name: Run base tests
if: ${{ matrix.db != 'aurora-postgres' }}
run: |
TESTS=`testsuite/integration-arquillian/tests/base/testsuites/suite.sh database`
echo "Tests: $TESTS"
@ -334,6 +399,27 @@ jobs:
with:
job-id: store-integration-tests-${{ matrix.db }}
- name: EC2 Maven Logs
if: failure()
uses: actions/upload-artifact@v3
with:
name: store-it-mvn-logs
path: .github/scripts/ansible/files
- name: Delete Aurora EC2 Instance
if: ${{ always() && matrix.db == 'aurora-postgres' }}
working-directory: .github/scripts/ansible
run: |
export CLUSTER_NAME=${{ steps.aurora-tests.outputs.ec2_cluster }}
./aws_ec2.sh delete ${{ steps.aurora-init.outputs.region }}
- name: Delete Aurora DB
if: ${{ always() && matrix.db == 'aurora-postgres' }}
uses: ./.github/actions/aurora-delete-database
with:
name: ${{ steps.aurora-init.outputs.name }}
region: ${{ steps.aurora-init.outputs.region }}
store-model-tests:
name: Store Model Tests
runs-on: ubuntu-latest
@ -682,7 +768,7 @@ jobs:
GH_TOKEN: ${{ github.token }}
with:
job-name: Migration Tests
- name: Surefire reports
if: always()
uses: ./.github/actions/archive-surefire-reports

View file

@ -1006,3 +1006,25 @@ The log should contain `KeycloakFipsSecurityProvider` mentioning "Approved mode"
```
KC(BCFIPS version 1.000203 Approved Mode, FIPS-JVM: enabled) version 1.0 - class org.keycloak.crypto.fips.KeycloakFipsSecurityProvider,
```
## Aurora DB Tests
To run the Aurora DB tests on a local machine, do the following:
1. Provision an Aurora DB:
```bash
AURORA_CLUSTER="example-cluster"
AURORA_REGION=eu-west-1
AURORA_PASSWORD=TODO
source ./.github/scripts/aws/rds/aurora_create.sh
```
2. Execute the store integration tests:
```bash
TESTS=`testsuite/integration-arquillian/tests/base/testsuites/suite.sh database`
mvn test -Pauth-server-quarkus -Pdb-aurora-postgres -Dtest=$TESTS -Dauth.server.db.host=$AURORA_ENDPOINT -Dkeycloak.connectionsJpa.password=$AURORA_PASSWORD -pl testsuite/integration-arquillian/tests/base
```
3. Teardown Aurora DB instance:
```bash
./.github/scripts/aws/rds/aurora_delete.sh
```

View file

@ -502,6 +502,24 @@
<docker.database.wait-for-log-regex>(?si)Ready for start up.*ready [^\n]{0,30}connections</docker.database.wait-for-log-regex>
</properties>
</profile>
<profile>
<id>db-aurora-postgres</id>
<properties>
<keycloak.storage.connections.vendor>postgres</keycloak.storage.connections.vendor>
<keycloak.connectionsJpa.driver>org.postgresql.Driver</keycloak.connectionsJpa.driver>
<keycloak.connectionsJpa.database>keycloak</keycloak.connectionsJpa.database>
<keycloak.connectionsJpa.user>keycloak</keycloak.connectionsJpa.user>
<keycloak.connectionsJpa.password>secret99</keycloak.connectionsJpa.password>
<keycloak.connectionsJpa.jdbcParameters/>
<keycloak.connectionsJpa.url>jdbc:postgresql://${auth.server.db.host}/${keycloak.connectionsJpa.database}${keycloak.connectionsJpa.jdbcParameters}</keycloak.connectionsJpa.url>
<!-- JDBC properties point to "default" JDBC driver for the particular DB -->
<!-- For EAP testing, it is recommended to override those with system properties pointing to GAV of more appropriate JDBC driver -->
<!-- for the particular EAP version -->
<jdbc.mvn.groupId>org.postgresql</jdbc.mvn.groupId>
<jdbc.mvn.artifactId>postgresql</jdbc.mvn.artifactId>
<jdbc.mvn.version>${postgresql-jdbc.version}</jdbc.mvn.version>
</properties>
</profile>
<profile>
<id>db-allocator-db-postgres</id>
<properties>