mirror of
https://github.com/Alfresco/alfresco-community-repo.git
synced 2025-07-31 17:39:05 +00:00
Merge pull request #1205 from Alfresco/feature/APPS-261--run-worm-automated-tests
APPS:298 Run WORM automated tests
This commit is contained in:
71
.travis.yml
71
.travis.yml
@@ -1,14 +1,22 @@
|
||||
---
|
||||
import:
|
||||
- source: travis-env-vars.yml
|
||||
os: linux
|
||||
dist: xenial
|
||||
sudo: required
|
||||
language: java
|
||||
jdk:
|
||||
- openjdk11
|
||||
jdk: openjdk11
|
||||
|
||||
addons:
|
||||
firefox: "43.0.1"
|
||||
|
||||
services:
|
||||
- xvfb
|
||||
- docker
|
||||
|
||||
git:
|
||||
depth: false
|
||||
quiet: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
@@ -18,14 +26,16 @@ branches:
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.m2
|
||||
- ${HOME}/.m2/repository
|
||||
|
||||
# the cache can grow constantly
|
||||
before_cache:
|
||||
- rm -rf $HOME/.m2/repository/org/alfresco/alfresco-governance-services*
|
||||
before_cache: rm -rf ${HOME}/.m2/repository/org/alfresco/alfresco-governance-services*
|
||||
|
||||
before_install:
|
||||
- "cp .travis.settings.xml $HOME/.m2/settings.xml"
|
||||
- mkdir -p "${HOME}/.m2" && cp -f .travis.settings.xml "${HOME}/.m2/settings.xml"
|
||||
- find "${HOME}/.m2/repository/" -type d -name "*-SNAPSHOT*" | xargs -r -l rm -rf
|
||||
- docker login quay.io -u ${QUAY_USERNAME} -p ${QUAY_PASSWORD}
|
||||
- docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
|
||||
install: skip
|
||||
|
||||
stages:
|
||||
@@ -46,18 +56,18 @@ jobs:
|
||||
|
||||
- name: "Build AGS Enterprise"
|
||||
stage: Build AGS
|
||||
before_script: source scripts/setUpMavenPhase.sh
|
||||
install:
|
||||
- travis_retry travis_wait 30 mvn -B deploy -N
|
||||
- travis_retry travis_wait 60 mvn -B -q clean install $MVN_SKIP -f rm-community/pom.xml -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn
|
||||
before_script: source scripts/setUpMavenPhase.sh
|
||||
script:
|
||||
- travis_retry travis_wait 80 mvn -B -q ${MAVEN_PHASE} -P${BUILD_PROFILE} -Dimage.tag=${IMAGE_TAG} -Dskip.integrationtests=false -f rm-enterprise/pom.xml -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn
|
||||
|
||||
- name: "Build AGS Benchmark"
|
||||
stage: Build AGS
|
||||
before_script: source scripts/setUpMavenPhase.sh
|
||||
install:
|
||||
- travis_retry travis_wait 80 mvn -B -q clean install $MVN_SKIP -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn
|
||||
before_script: source scripts/setUpMavenPhase.sh
|
||||
script:
|
||||
- travis_retry travis_wait 35 mvn -B -q ${MAVEN_PHASE} -Dskip.integrationtests=false -f rm-benchmark/pom.xml -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn
|
||||
|
||||
@@ -71,41 +81,68 @@ jobs:
|
||||
- echo "Enterprise Integrations Tests on MySQL"
|
||||
- name: "Community Rest API Tests"
|
||||
stage: Tests
|
||||
before_install:
|
||||
- travis_retry travis_wait 90 mvn -B -q install $MVN_SKIP -PbuildDockerImage -pl :alfresco-governance-services-community-repo -am
|
||||
install:
|
||||
- travis_retry travis_wait 90 mvn -B -q install $MVN_SKIP -PbuildDockerImage -pl :alfresco-governance-services-community-repo -am
|
||||
before_script:
|
||||
- bash scripts/startAlfresco.sh $COMMUNITY_REPO_PATH
|
||||
- bash scripts/waitForAlfrescoToStart.sh
|
||||
script:
|
||||
- echo "Community Rest API Tests"
|
||||
- name: "Enterprise Rest API Tests"
|
||||
stage: Tests
|
||||
before_install:
|
||||
- travis_retry travis_wait 90 mvn -B -q install $MVN_SKIP -PbuildDockerImage -pl :alfresco-governance-services-enterprise-repo -am
|
||||
install:
|
||||
- travis_retry travis_wait 90 mvn -B -q install $MVN_SKIP -PbuildDockerImage -pl :alfresco-governance-services-enterprise-repo -am
|
||||
before_script:
|
||||
- bash scripts/startAlfresco.sh $ENTERPRISE_REPO_PATH
|
||||
- bash scripts/waitForAlfrescoToStart.sh
|
||||
script:
|
||||
- echo "Enterprise Rest API Tests"
|
||||
|
||||
- name: "Enterprise Rest API WORM Tests"
|
||||
stage: Tests
|
||||
install:
|
||||
- travis_retry travis_wait 90 mvn -B -U -q clean install ${MVN_SKIP} -PbuildDockerImage -pl :alfresco-governance-services-enterprise-repo,:alfresco-governance-services-enterprise-share -am
|
||||
- travis_retry travis_wait 30 mvn -B -U -q clean install ${MVN_SKIP} -pl :alfresco-governance-services-automation-enterprise-rest-api -am
|
||||
before_script:
|
||||
- bash scripts/create-worm-bucket.sh
|
||||
- bash scripts/start-compose.sh "${ENTERPRISE_SHARE_PATH}/docker-compose-worm-support-rest.yml"
|
||||
- bash scripts/waitForAlfrescoToStart.sh
|
||||
script: mvn -B test -pl :alfresco-governance-services-automation-enterprise-rest-api -DsuiteXmlFile=wormTestSuite.xml -Dskip.automationtests=false
|
||||
after_script: bash scripts/cleanup.sh
|
||||
after_failure: docker ps -a | grep '_alfresco_1' | awk '{print $1}' | xargs docker logs | tail -5000
|
||||
|
||||
- name: "Community UI Tests ..."
|
||||
stage: Tests
|
||||
before_install:
|
||||
- travis_retry travis_wait 90 mvn -B -q install $MVN_SKIP -PbuildDockerImage -pl :alfresco-governance-services-community-repo,:alfresco-governance-services-community-share -am
|
||||
install:
|
||||
- travis_retry travis_wait 90 mvn -B -q install $MVN_SKIP -PbuildDockerImage -pl :alfresco-governance-services-community-repo,:alfresco-governance-services-community-share -am
|
||||
before_script:
|
||||
- bash scripts/startAlfresco.sh $COMMUNITY_SHARE_PATH
|
||||
- bash scripts/waitForAlfrescoToStart.sh
|
||||
script:
|
||||
- echo "Community UI Tests ..."
|
||||
- name: "Enterprise UI Tests ..."
|
||||
stage: Tests
|
||||
before_install:
|
||||
- travis_retry travis_wait 90 mvn -B -q install $MVN_SKIP -PbuildDockerImage -pl :alfresco-governance-services-enterprise-repo,:alfresco-governance-services-enterprise-share -am
|
||||
install:
|
||||
- travis_retry travis_wait 90 mvn -B -q install $MVN_SKIP -PbuildDockerImage -pl :alfresco-governance-services-enterprise-repo,:alfresco-governance-services-enterprise-share -am
|
||||
before_script:
|
||||
- bash scripts/startAlfresco.sh $ENTERPRISE_SHARE_PATH
|
||||
- bash scripts/waitForAlfrescoToStart.sh
|
||||
script:
|
||||
- echo "Enterprise UI Tests ..."
|
||||
|
||||
- name: "Enterprise UI WORM Tests"
|
||||
stage: Tests
|
||||
install:
|
||||
- travis_retry travis_wait 90 mvn -B -U -q clean install ${MVN_SKIP} -PbuildDockerImage -pl :alfresco-governance-services-enterprise-repo,:alfresco-governance-services-enterprise-share -am
|
||||
- travis_retry travis_wait 30 mvn -B -U -q clean install ${MVN_SKIP} -pl :alfresco-governance-services-automation-ui -am
|
||||
before_script:
|
||||
- bash scripts/create-worm-bucket.sh
|
||||
- bash scripts/start-compose.sh "${ENTERPRISE_SHARE_PATH}/docker-compose-worm-support-ui.yml"
|
||||
- bash scripts/waitForAlfrescoToStart.sh
|
||||
script: mvn -B test -pl :alfresco-governance-services-automation-ui -DsuiteXmlFile=wormTestSuite.xml -Dskip.automationtests=false -Dshare.url=${SHARE_URL} -Dalfresco.url=${ALFRESCO_URL} -Dwebdriver.local.grid=true -Dwebdriver.browser=RemoteFireFox -Dwebdriver.localGrid=false -Dwebdriver.element.wait.time=20000 -Dwebdriver.page.render.wait.time=60000
|
||||
after_script: bash scripts/cleanup.sh
|
||||
after_failure: docker ps -a | grep '_alfresco_1' | awk '{print $1}' | xargs docker logs | tail -5000
|
||||
|
||||
- name: "Source Clear Scan (SCA)"
|
||||
stage: Security Scans
|
||||
script:
|
||||
|
2
pom.xml
2
pom.xml
@@ -983,7 +983,7 @@
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<version>2.22.1</version>
|
||||
<version>3.0.0-M5</version>
|
||||
<configuration>
|
||||
<argLine>
|
||||
--illegal-access=permit
|
||||
|
21
scripts/cleanup.sh
Normal file
21
scripts/cleanup.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
echo "=========================== Starting Cleanup Script ==========================="
|
||||
PS4="\[\e[35m\]+ \[\e[m\]"
|
||||
set -vx
|
||||
pushd "$(dirname "${BASH_SOURCE[0]}")/../"
|
||||
|
||||
|
||||
# Stop and remove the containers
|
||||
docker ps -a -q | xargs -l -r docker stop
|
||||
docker ps -a -q | xargs -l -r docker rm
|
||||
|
||||
pip install awscli
|
||||
printf "${CREATE_BUCKET_AWS_ACCESS_KEY}\n${CREATE_BUCKET_AWS_SECRET_KEY}\n\n\n" | aws configure
|
||||
|
||||
aws s3 ls | awk '{print $3}' | grep "^${S3_BUCKET_NAME}" | xargs -l -r -I{} aws s3 rb "s3://{}" --force
|
||||
aws s3 ls | awk '{print $3}' | grep "^${S3_BUCKET2_NAME}" | xargs -l -r -I{} aws s3 rb "s3://{}" --force
|
||||
|
||||
popd
|
||||
set +vx
|
||||
echo "=========================== Finishing Cleanup Script =========================="
|
26
scripts/create-worm-bucket.sh
Normal file
26
scripts/create-worm-bucket.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
echo "=========================== Create Worm Bucket ==========================="
|
||||
PS4="\[\e[35m\]+ \[\e[m\]"
|
||||
set -vex
|
||||
pushd "$(dirname "${BASH_SOURCE[0]}")/../"
|
||||
|
||||
pip install awscli
|
||||
printf "${CREATE_BUCKET_AWS_ACCESS_KEY}\n${CREATE_BUCKET_AWS_SECRET_KEY}\n\n\n" | aws configure
|
||||
|
||||
if aws s3 ls | awk '{print $3}' | grep -q "^${S3_BUCKET2_NAME}$" ; then
|
||||
echo "Bucket ${S3_BUCKET2_NAME} already exists"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
aws s3api create-bucket --bucket "${S3_BUCKET2_NAME}" --region ${S3_BUCKET_REGION} --object-lock-enabled-for-bucket
|
||||
aws s3api put-object-lock-configuration \
|
||||
--bucket "${S3_BUCKET2_NAME}" \
|
||||
--object-lock-configuration 'ObjectLockEnabled=Enabled,Rule={DefaultRetention={Mode=COMPLIANCE,Days=1}}'
|
||||
|
||||
aws s3api put-bucket-tagging --bucket "${S3_BUCKET2_NAME}" \
|
||||
--tagging="TagSet=[{Key=toDeleteAfterTests,Value=true}]"
|
||||
|
||||
popd
|
||||
set +vex
|
||||
echo "=========================== Finishing Create Worm Bucket Script =========================="
|
105
scripts/delete-test-buckets-lambda.py
Normal file
105
scripts/delete-test-buckets-lambda.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import json
|
||||
import boto3
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
|
||||
# This python module is intended for use as a Python 3 AWS lambda function
|
||||
# Tested in python 3.6 environment
|
||||
# The AWS role used with this lambda function will need AmazonS3FullAccess and CloudWatchLogsFullAccess permissions
|
||||
# Tested with role lambda_s3_execution_role in engineering account
|
||||
|
||||
# Retrieve bucket's tag set
|
||||
def get_tagset(bucket):
|
||||
try:
|
||||
return bucket.Tagging().tag_set
|
||||
except ClientError as e:
|
||||
return []
|
||||
|
||||
# Check if a bucket should be deleted
|
||||
def tag_matches(bucket):
|
||||
for tag in get_tagset(bucket):
|
||||
if tag["Key"] == "toDeleteAfterTests" and tag["Value"] == "true" :
|
||||
return True
|
||||
return False
|
||||
|
||||
def age_matches(bucket):
|
||||
delta = datetime.now(timezone.utc) - bucket.creation_date
|
||||
return delta.days > 0
|
||||
|
||||
def prefix_matches(bucket, prefix):
|
||||
if not prefix:
|
||||
return True
|
||||
if bucket.name.startswith(prefix):
|
||||
return True
|
||||
return False
|
||||
|
||||
# Get a list of buckets to delete
|
||||
def get_buckets_to_delete(prefix):
|
||||
s3 = boto3.resource('s3')
|
||||
|
||||
# Get all buckets matching bucket name prefix
|
||||
prefixed_buckets = [bucket for bucket in s3.buckets.all() if prefix_matches(bucket, prefix)]
|
||||
|
||||
# Filter buckets on tag
|
||||
# tagged_buckets = [bucket for bucket in prefixed_buckets if tag_matches(bucket)]
|
||||
|
||||
# Filter buckets on age
|
||||
old_buckets = [bucket for bucket in prefixed_buckets if age_matches(bucket)]
|
||||
|
||||
return old_buckets
|
||||
|
||||
# Delete bucket
|
||||
def delete_bucket(bucket):
|
||||
try:
|
||||
[object.delete for object in bucket.objects.all()]
|
||||
except ClientError as e:
|
||||
print("Failed to delete objects in bucket: " + bucket.name)
|
||||
print(e)
|
||||
try:
|
||||
bucket.objects.all().delete()
|
||||
except ClientError as e:
|
||||
print("Failed to delete objects in bucket: " + bucket.name)
|
||||
print(e)
|
||||
|
||||
try:
|
||||
[version.delete() for version in bucket.object_versions.all()]
|
||||
except ClientError as e:
|
||||
print("Failed to delete object_versions in bucket: " + bucket.name)
|
||||
print(e)
|
||||
try:
|
||||
bucket.object_versions.delete()
|
||||
except ClientError as e:
|
||||
print("Failed to delete object_versions in bucket: " + bucket.name)
|
||||
print(e)
|
||||
|
||||
try:
|
||||
bucket.delete()
|
||||
print("Bucket " + bucket.name + " was deleted")
|
||||
except ClientError as e:
|
||||
print("Failed to delete bucket: " + bucket.name)
|
||||
print(e)
|
||||
|
||||
|
||||
# Non-empty buckets are deleted (recursively); failed attempts will be logged.
|
||||
# The buckets are filtered on the name prefix: "travis-ags-worm-"
|
||||
def lambda_handler(event, context):
|
||||
|
||||
# Retrieve bucket name prefix option
|
||||
prefix = "travis-ags-"
|
||||
|
||||
# Get a list of buckets to delete
|
||||
buckets_to_delete = get_buckets_to_delete(prefix)
|
||||
|
||||
# Delete buckets
|
||||
print ("Deleting buckets:")
|
||||
for bucket in buckets_to_delete :
|
||||
print (bucket.name)
|
||||
delete_bucket(bucket)
|
||||
|
||||
return {
|
||||
'statusCode': 200,
|
||||
'body': json.dumps('Done!')
|
||||
}
|
||||
|
||||
#lambda_handler(None, None)
|
@@ -2,27 +2,22 @@
|
||||
echo "Branch name: ${TRAVIS_BRANCH}"
|
||||
echo "Pull request: ${TRAVIS_PULL_REQUEST}"
|
||||
echo "Travis job name: ${TRAVIS_JOB_NAME}"
|
||||
branchName=${TRAVIS_BRANCH}
|
||||
imageTag=${branchName:8}
|
||||
echo "Image tag: ${imageTag}"
|
||||
echo "Image tag: ${TRAVIS_BRANCH:8}"
|
||||
|
||||
if [[ ${TRAVIS_JOB_NAME} == "Build AGS Enterprise" ]];
|
||||
then
|
||||
if [[ ${TRAVIS_JOB_NAME} == "Build AGS Enterprise" ]] ; then
|
||||
export BUILD_PROFILE="internal"
|
||||
else
|
||||
else
|
||||
export BUILD_PROFILE="master"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${TRAVIS_BRANCH}" == "master" && "${TRAVIS_PULL_REQUEST}" == "false" ]];
|
||||
then
|
||||
if [[ "${TRAVIS_BRANCH}" == "master" && "${TRAVIS_PULL_REQUEST}" == "false" ]] ; then
|
||||
export MAVEN_PHASE="deploy"
|
||||
export IMAGE_TAG="latest"
|
||||
elif [[ ${TRAVIS_BRANCH} = release* && "${TRAVIS_PULL_REQUEST}" == "false" ]];
|
||||
then
|
||||
elif [[ ${TRAVIS_BRANCH} = release* && "${TRAVIS_PULL_REQUEST}" == "false" ]] ; then
|
||||
export MAVEN_PHASE="deploy"
|
||||
export IMAGE_TAG="${imageTag}-latest"
|
||||
else
|
||||
export IMAGE_TAG="${TRAVIS_BRANCH:8}-latest"
|
||||
else
|
||||
export MAVEN_PHASE="verify"
|
||||
export BUILD_PROFILE="buildDockerImage"
|
||||
export IMAGE_TAG="latest"
|
||||
fi
|
||||
fi
|
||||
|
20
scripts/start-compose.sh
Executable file
20
scripts/start-compose.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
export DOCKER_COMPOSE_PATH="${1}"
|
||||
|
||||
if [ -z "$DOCKER_COMPOSE_PATH" ] ; then
|
||||
echo "Please provide path to docker-compose.yml: \"${0##*/} /path/to/docker-compose.yml\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Starting AGS stack in ${DOCKER_COMPOSE_PATH}"
|
||||
|
||||
# .env files are picked up from project directory correctly on docker-compose 1.23.0+
|
||||
docker-compose --file "${DOCKER_COMPOSE_PATH}" --project-directory $(dirname "${DOCKER_COMPOSE_PATH}") up -d
|
||||
|
||||
if [ $? -eq 0 ] ; then
|
||||
echo "Docker Compose started ok"
|
||||
else
|
||||
echo "Docker Compose failed to start" >&2
|
||||
exit 1
|
||||
fi
|
@@ -3,5 +3,4 @@
|
||||
set -ev
|
||||
|
||||
cd $1
|
||||
docker login quay.io -u ${QUAY_USERNAME} -p ${QUAY_PASSWORD}
|
||||
docker-compose up -d
|
||||
|
@@ -3,21 +3,27 @@
|
||||
WAIT_INTERVAL=1
|
||||
COUNTER=0
|
||||
TIMEOUT=300
|
||||
t0=`date +%s`
|
||||
t0=$(date +%s)
|
||||
|
||||
echo "Waiting for alfresco to start"
|
||||
until $(curl --output /dev/null --silent --head --fail http://localhost:8080/alfresco) || [ "$COUNTER" -eq "$TIMEOUT" ]; do
|
||||
until $(curl --output /dev/null --silent --head --fail ${ALFRESCO_URL}) || [ "$COUNTER" -eq "$TIMEOUT" ]; do
|
||||
printf '.'
|
||||
sleep $WAIT_INTERVAL
|
||||
COUNTER=$(($COUNTER+$WAIT_INTERVAL))
|
||||
done
|
||||
|
||||
if (("$COUNTER" < "$TIMEOUT")) ; then
|
||||
t1=`date +%s`
|
||||
t1=$(date +%s)
|
||||
delta=$((($t1 - $t0)/60))
|
||||
echo "Alfresco Started in $delta minutes"
|
||||
else
|
||||
echo "Waited $COUNTER seconds"
|
||||
echo "Alfresco Could not start in time."
|
||||
echo "All started containers:"
|
||||
docker ps -a
|
||||
ALFCONTAINER=$(docker ps -a | grep _alfresco_1 | awk '{ print $1 }')
|
||||
echo "Last 200 lines from alfresco.log on container $ALFCONTAINER:"
|
||||
docker logs --tail=200 ${ALFCONTAINER}
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@@ -8,3 +8,11 @@ env:
|
||||
- ENTERPRISE_REPO_PATH=rm-enterprise/rm-enterprise-repo
|
||||
- ENTERPRISE_SHARE_PATH=rm-enterprise/rm-enterprise-share
|
||||
- MVN_SKIP='-Dmaven.javadoc.skip=true -Dmaven.source.skip=true -DskipTests'
|
||||
- AWS_ACCESS_KEY_ID=${CREATE_BUCKET_AWS_ACCESS_KEY}
|
||||
- AWS_SECRET_ACCESS_KEY=${CREATE_BUCKET_AWS_SECRET_KEY}
|
||||
- S3_BUCKET_REGION="us-east-1"
|
||||
- S3_BUCKET_NAME="travis-ags-${TRAVIS_JOB_NUMBER}"
|
||||
- S3_BUCKET2_NAME="travis-ags-worm-b2"
|
||||
- ALFRESCO_URL="http://localhost:8080/alfresco"
|
||||
- SHARE_URL="http://localhost:8181/share"
|
||||
- DISPLAY=:99.0
|
||||
|
Reference in New Issue
Block a user