Move scripts used by travis jobs under travis folder because when mirroring all scripts folders are excluded

This commit is contained in:
cagache
2021-04-01 18:44:35 +03:00
parent 3f25704425
commit e7d25af3b4
19 changed files with 44 additions and 44 deletions

View File

@@ -0,0 +1,18 @@
#!/bin/bash
alfresco_docker_image=$1
# Verify release tags
get_tags="$(curl https://hub.docker.com/r/$alfresco_docker_image/tags/ | grep -o '\"result\".*\"]')"
arrayTags=($get_tags)
echo "Existing Tags: $get_tags"
for tag in "${arrayTags[@]}"
do
if [[ $tag = ${RELEASE_VERSION} ]]; then
echo "Tag ${RELEASE_VERSION} already pushed, release process will interrupt."
exit 0
fi
done
echo "The ${RELEASE_VERSION} tag was not found"

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -x
echo $imagesToBeDeleted
echo "List all images:"
docker images -a
docker_images_list=$(docker images | grep $imagesToBeDeleted | awk '{print $3}' | uniq)
if [ "$docker_images_list" == "" ]; then
echo "No docker images on the agent"
else
echo "Clearing images: $docker_images_list"
if docker rmi -f $docker_images_list ; then
echo "Deleting images was successful."
else
echo "Deleting specified images failed, so falling back to delete ALL images on system."
docker rmi -f $(docker images -aq)
fi
fi

21
travis/scripts/cleanup.sh Normal file
View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
echo "=========================== Starting Cleanup Script ==========================="
PS4="\[\e[35m\]+ \[\e[m\]"
set -vx
pushd "$(dirname "${BASH_SOURCE[0]}")/../"
# Stop and remove the containers
docker ps -a -q | xargs -l -r docker stop
docker ps -a -q | xargs -l -r docker rm
pip install awscli
printf "${CREATE_BUCKET_AWS_ACCESS_KEY}\n${CREATE_BUCKET_AWS_SECRET_KEY}\n\n\n" | aws configure
aws s3 ls | awk '{print $3}' | grep "^${S3_BUCKET_NAME}" | xargs -l -r -I{} aws s3 rb "s3://{}" --force
aws s3 ls | awk '{print $3}' | grep "^${S3_BUCKET2_NAME}" | xargs -l -r -I{} aws s3 rb "s3://{}" --force
popd
set +vx
echo "=========================== Finishing Cleanup Script =========================="

View File

@@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -e
# Use full history for release
git checkout -B "${TRAVIS_BRANCH}"
git config user.email "${GIT_EMAIL}"
if [ -z ${RELEASE_VERSION} ] || [ -z ${DEVELOPMENT_VERSION} ]; then
echo "Please provide a Release and Development version"
exit 1
fi
mvn -B \
-Dusername="${GIT_USERNAME}" \
-Dpassword="${GIT_PASSWORD}" \
-DreleaseVersion=${RELEASE_VERSION} \
-DdevelopmentVersion=${DEVELOPMENT_VERSION} \
-DscmCommentPrefix="[maven-release-plugin][skip ci] " \
-DuseReleaseProfile=false \
"-Darguments=-DskipTests -P\!enterprise -Prelease-community,community-release" \
release:clean release:prepare release:perform

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env bash
echo "=========================== Create Worm Bucket ==========================="
PS4="\[\e[35m\]+ \[\e[m\]"
set -vex
pushd "$(dirname "${BASH_SOURCE[0]}")/../"
pip install awscli
printf "${CREATE_BUCKET_AWS_ACCESS_KEY}\n${CREATE_BUCKET_AWS_SECRET_KEY}\n\n\n" | aws configure
if aws s3 ls | awk '{print $3}' | grep -q "^${S3_BUCKET2_NAME}$" ; then
echo "Bucket ${S3_BUCKET2_NAME} already exists"
exit 0
fi
aws s3api create-bucket --bucket "${S3_BUCKET2_NAME}" --region ${S3_BUCKET_REGION} --object-lock-enabled-for-bucket
aws s3api put-object-lock-configuration \
--bucket "${S3_BUCKET2_NAME}" \
--object-lock-configuration 'ObjectLockEnabled=Enabled,Rule={DefaultRetention={Mode=COMPLIANCE,Days=1}}'
aws s3api put-bucket-tagging --bucket "${S3_BUCKET2_NAME}" \
--tagging="TagSet=[{Key=toDeleteAfterTests,Value=true}]"
popd
set +vex
echo "=========================== Finishing Create Worm Bucket Script =========================="

View File

@@ -0,0 +1,105 @@
import json
import boto3
from datetime import datetime, timedelta, timezone
from botocore.exceptions import ClientError
# This python module is intended for use as a Python 3 AWS lambda function
# Tested in python 3.6 environment
# The AWS role used with this lambda function will need AmazonS3FullAccess and CloudWatchLogsFullAccess permissions
# Tested with role lambda_s3_execution_role in engineering account
# Retrieve bucket's tag set
def get_tagset(bucket):
try:
return bucket.Tagging().tag_set
except ClientError as e:
return []
# Check if a bucket should be deleted
def tag_matches(bucket):
for tag in get_tagset(bucket):
if tag["Key"] == "toDeleteAfterTests" and tag["Value"] == "true" :
return True
return False
def age_matches(bucket):
delta = datetime.now(timezone.utc) - bucket.creation_date
return delta.days > 0
def prefix_matches(bucket, prefix):
if not prefix:
return True
if bucket.name.startswith(prefix):
return True
return False
# Get a list of buckets to delete
def get_buckets_to_delete(prefix):
s3 = boto3.resource('s3')
# Get all buckets matching bucket name prefix
prefixed_buckets = [bucket for bucket in s3.buckets.all() if prefix_matches(bucket, prefix)]
# Filter buckets on tag
# tagged_buckets = [bucket for bucket in prefixed_buckets if tag_matches(bucket)]
# Filter buckets on age
old_buckets = [bucket for bucket in prefixed_buckets if age_matches(bucket)]
return old_buckets
# Delete bucket
def delete_bucket(bucket):
try:
[object.delete for object in bucket.objects.all()]
except ClientError as e:
print("Failed to delete objects in bucket: " + bucket.name)
print(e)
try:
bucket.objects.all().delete()
except ClientError as e:
print("Failed to delete objects in bucket: " + bucket.name)
print(e)
try:
[version.delete() for version in bucket.object_versions.all()]
except ClientError as e:
print("Failed to delete object_versions in bucket: " + bucket.name)
print(e)
try:
bucket.object_versions.delete()
except ClientError as e:
print("Failed to delete object_versions in bucket: " + bucket.name)
print(e)
try:
bucket.delete()
print("Bucket " + bucket.name + " was deleted")
except ClientError as e:
print("Failed to delete bucket: " + bucket.name)
print(e)
# Non-empty buckets are deleted (recursively); failed attempts will be logged.
# The buckets are filtered on the name prefix: "travis-ags-worm-"
def lambda_handler(event, context):
# Retrieve bucket name prefix option
prefix = "travis-ags-"
# Get a list of buckets to delete
buckets_to_delete = get_buckets_to_delete(prefix)
# Delete buckets
print ("Deleting buckets:")
for bucket in buckets_to_delete :
print (bucket.name)
delete_bucket(bucket)
return {
'statusCode': 200,
'body': json.dumps('Done!')
}
#lambda_handler(None, None)

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env bash
set -x
#stop not needed containers
docker stop $(docker ps -a | grep '_zeppelin_' | awk '{print $1}')
docker stop $(docker ps -a | grep '_sync-service_' | awk '{print $1}')
shareContainerId=$(docker ps -a | grep '_share_' | awk '{print $1}')
if [ -n "$shareContainerId" ]; then
docker stop $(docker ps -a | grep '_transform-router_' | awk '{print $1}')
docker stop $(docker ps -a | grep '_shared-file-store_' | awk '{print $1}')
fi
# Display containers resources usage
docker stats --no-stream

17
travis/scripts/getLogs.sh Normal file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
set -x
# Display running containers
docker ps
alfrescoContainerId=$(docker ps -a | grep '_alfresco_' | awk '{print $1}')
shareContainerId=$(docker ps -a | grep '_share_' | awk '{print $1}')
solrContainerId=$(docker ps -a | grep '_search_' | awk '{print $1}')
docker logs $alfrescoContainerId > alfresco.log
if [ -n "$shareContainerId" ]; then
docker logs $shareContainerId > share.log
fi
if [ -n "$solrContainerId" ]; then
docker logs $solrContainerId > solr.log
fi

View File

@@ -0,0 +1,78 @@
#!/usr/bin/env bash
set -x
scriptName=`basename "$0"`
usage="Usage: $scriptName [options]
-h , --help show this help text
-i <sourceImage> a source image to use
(e.g. quay.io/alfresco/ags-share-community)
-r <repository> a repository to push new tags to
(e.g. registry.hub.docker.com)
-t <tag> the existing tag for the images (mandatory)
-d <digestLength> the length of digest to output (default 12 chars)"
digestLength=12
while getopts ':hi:r:t:d:' option; do
case "$option" in
h) echo -e "Tag one or more images to include the digest and push this to some repositories.\n\n${usage}"
exit
;;
i) sourceImages+=("$OPTARG")
;;
r) repositories+=("$OPTARG")
;;
t) existingTag=$OPTARG
;;
d) digestLength=$OPTARG
;;
:) echo -e "Missing argument for -${OPTARG}\n\n${usage}" >&2
exit 1
;;
\?) echo -e "Illegal option: -${OPTARG}\n\n${usage}" >&2
exit 1
;;
esac
done
shift $((OPTIND - 1))
if [ "#$existingTag" == "#" ]; then
echo -e "Please supply a tag with the -t option.\n\n${usage}" >&2
exit 1
fi
for sourceImage in ${sourceImages[@]}
do
echo "Processing $sourceImage"
# Note that this command should work even if the image is already present locally.
digest=`docker pull ${sourceImage}:${existingTag} | grep "Digest:" | awk -F':' '{print $3}' | cut -c 1-$digestLength`
if [ ${#digest} != $digestLength ]
then
echo "Unexpected length for digest of ${sourceImage}: '${digest}'" >&2
exit 1
fi
newTag=${existingTag}-${digest}
# Remove the source repository name if it contains one.
slashes=`echo $sourceImage | sed "s|[^/]||g"`
if [ ${#slashes} == 2 ]
then
# The repository name is everything up to the first slash.
image=`echo $sourceImage | sed "s|[^/]*/||"`
else
# Assume the source image doesn't reference the repository name.
image=$sourceImage
fi
for repository in ${repositories[@]}
do
docker tag ${sourceImage}:${existingTag} ${repository}/${image}:${newTag}
docker push ${repository}/${image}:${newTag}
echo "Pushed ${sourceImage}:${existingTag} to ${repository}/${image}:${newTag}"
done
done

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
set -e
echo "Travis commit message: $TRAVIS_COMMIT_MESSAGE"
release_message=$(echo "$TRAVIS_COMMIT_MESSAGE" | grep -Po '(\[(community|enterprise)\srelease\s((\d)+\.)+((\d)+|[a-z])(-[A-Z](\d)+){0,1}\s((\d)+\.)+(\d)+-SNAPSHOT\])')
if [ ! -n "$release_message" ]; then
echo "The commit message is in the wrong format or it does not contain all the required properties."
exit 1
fi
export RELEASE_VERSION=$(echo $release_message | grep -Po '((\d)+\.)+((\d)+|[a-z])(-[A-Z](\d)+){0,1}' | head -1)
export DEVELOPMENT_VERSION=$(echo $release_message | grep -Po '((\d)+\.)+(\d)+-SNAPSHOT')
echo "Release version is set to $RELEASE_VERSION"
echo "Development version is set to $DEVELOPMENT_VERSION"

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
# fail script immediately on any errors in external commands and print the lines
set -ev
mvn -B -q clean install \
-DskipTests \
-Dmaven.javadoc.skip=true \
-pl '!rm-automation,!rm-automation/rm-automation-community-rest-api,!rm-automation/rm-automation-enterprise-rest-api,!rm-automation/rm-automation-ui,!rm-benchmark' \
com.srcclr:srcclr-maven-plugin:scan \
-Dcom.srcclr.apiToken=$SRCCLR_API_TOKEN > scan.log
SUCCESS=$? # this will read exit code of the previous command
cat scan.log | grep -e 'Full Report Details' -e 'Failed'
exit ${SUCCESS}

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
export DOCKER_COMPOSE_PATH="${1}"
if [ -z "$DOCKER_COMPOSE_PATH" ] ; then
echo "Please provide path to docker-compose.yml: \"${0##*/} /path/to/docker-compose.yml\""
exit 1
fi
echo "Starting AGS stack in ${DOCKER_COMPOSE_PATH}"
# .env files are picked up from project directory correctly on docker-compose 1.23.0+
docker-compose --file "${DOCKER_COMPOSE_PATH}" --project-directory $(dirname "${DOCKER_COMPOSE_PATH}") up -d
if [ $? -eq 0 ] ; then
echo "Docker Compose started ok"
else
echo "Docker Compose failed to start" >&2
exit 1
fi

View File

@@ -0,0 +1,11 @@
#!/usr/bin/env bash
# fail script immediately on any errors in external commands and print the lines
set -ev
cd $1
# if 2nd input parameter is true then use .env.ci where TRANSFORM_SERVICE_ENABLED flag is set to false
# in order to not use anymore Transform router and Shared File Store
if $2 ; then
mv -u .env.ci .env
fi
docker-compose up -d

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env bash
set -xe
WAIT_INTERVAL=1
COUNTER=0
TIMEOUT=300
t0=$(date +%s)
echo "Waiting for alfresco to start"
until $(curl --output /dev/null --silent --head --fail ${ALFRESCO_URL}) || [ "$COUNTER" -eq "$TIMEOUT" ]; do
printf '.'
sleep $WAIT_INTERVAL
COUNTER=$(($COUNTER+$WAIT_INTERVAL))
done
if (("$COUNTER" < "$TIMEOUT")) ; then
t1=$(date +%s)
delta=$((($t1 - $t0)/60))
echo "Alfresco Started in $delta minutes"
else
echo "Waited $COUNTER seconds"
echo "Alfresco Could not start in time."
echo "All started containers:"
docker ps -a
ALFCONTAINER=$(docker ps -a | grep _alfresco_1 | awk '{ print $1 }')
echo "Last 200 lines from alfresco.log on container $ALFCONTAINER:"
docker logs --tail=200 ${ALFCONTAINER}
exit 1
fi

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -e
release_type=$1
mkdir "artifacts_dir"
mvn -B org.apache.maven.plugins:maven-dependency-plugin:3.1.1:copy \
-Dartifact=org.alfresco:alfresco-governance-services-${release_type}-repo:${RELEASE_VERSION}:amp \
-DoutputDirectory=artifacts_dir
mvn -B org.apache.maven.plugins:maven-dependency-plugin:3.1.1:copy \
-Dartifact=org.alfresco:alfresco-governance-services-${release_type}-share:${RELEASE_VERSION}:amp \
-DoutputDirectory=artifacts_dir
mvn -B org.apache.maven.plugins:maven-dependency-plugin:3.1.1:copy \
-Dartifact=org.alfresco:alfresco-governance-services-${release_type}-rest-api-explorer:${RELEASE_VERSION}:war \
-DoutputDirectory=artifacts_dir
cd artifacts_dir
zip alfresco-governance-services-${release_type}-${RELEASE_VERSION}.zip *
# rm *.amp *.war -f
ls

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env bash
set -e
release_type=$1
mkdir "artifacts_dir"
cp rm-${release_type}/rm-${release_type}-repo/target/alfresco-governance-services-${release_type}-repo-*.amp artifacts_dir
cp rm-${release_type}/rm-${release_type}-share/target/alfresco-governance-services-${release_type}-share-*.amp artifacts_dir
cp rm-${release_type}/rm-${release_type}-rest-api-explorer/target/alfresco-governance-services-${release_type}-rest-api-explorer-*.war artifacts_dir
cd artifacts_dir
zip alfresco-governance-services-${release_type}-${RELEASE_VERSION}.zip *
# rm *.amp *.war -f
ls