This commit is contained in:
Lucian Suciu
2019-03-26 08:58:27 +02:00
parent 76886b6888
commit 9a74ff0b26
9 changed files with 4 additions and 410 deletions

View File

@@ -1,219 +0,0 @@
include ../Makefile
include configs/cluster.env
export $(shell sed 's/=.*//' configs/cluster.env)
CHART_NAME := alfresco-incubator/alfresco-content-services
ifeq ($(DEPLOY_ACS_NAME),)
DEPLOY_ACS_NAME:=$(shell cat $(ACTIVE_DEPLOY) | grep DEPLOY_ACS_NAME | cut -d '=' -f2 )
endif
ifeq ($(DEPLOY_INGRESS_NAME),)
DEPLOY_INGRESS_NAME:=$(shell cat $(ACTIVE_DEPLOY) | grep DEPLOY_INGRESS_NAME | cut -d '=' -f2)
endif
cluster-bucket-create: ## 1 - (k8s) creates S3 bucket for KOPS cluster as defined in cluster.env
aws s3 mb $(KOPS_STATE_STORE)
aws s3api put-bucket-versioning --bucket $(S3_BUCKET_NAME) --versioning-configuration Status=Enabled
cluster-setup:
kops create cluster \
--ssh-public-key $(SSH_PUBLIC_KEY_LOCATION) \
--name $(KOPS_CLUSTER_NAME) \
--zones $(AWS_REGION)a \
--cloud aws \
-v 10 \
--kubernetes-version "$(KUBERNETES_VERSION)" \
--node-count 4 \
--master-size m4.xlarge \
--node-size m4.xlarge
cluster-install: cluster-setup ## 2 - (k8s) install the cluster as defined in cluster.env
kops update cluster --name ${KOPS_CLUSTER_NAME} --yes
cluster-validate: ## 3 - (k8s) validate the cluster as defined in cluster.env
kops validate cluster --name ${KOPS_CLUSTER_NAME}
cluster-dashboard-install: ## 4 - (k8s) install k8s dashboard
kubectl create clusterrolebinding permissive-binding \
--clusterrole=cluster-admin \
--user=admin \
--user=kubelet \
--group=system:serviceaccounts
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml
# kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
@echo Access the K8S Dashboard at: https://api.$(KOPS_CLUSTER_NAME)/$(K8S_DASHBOAR_URL)
@echo Login with admin token taken after running "make cluster-info"
cluster-prepare: ## 5 - (k8s) prepare environment adding helm, tiller, secrets, etc
kubectl create -f configs/tiller-rbac-config.yaml
helm init --service-account tiller
kubectl create -f secrets/quay-registry-secret.yaml --namespace default
# Manual Operation
@echo Now run the following:
@echo $ kops edit cluster --state $(KOPS_STATE_STORE)
@echo \nApply https://github.com/Alfresco/acs-deployment/blob/master/docs/k8s-pod-security-policies.md
@echo $ kops update cluster --yes --state $(KOPS_STATE_STORE)
@echo $ kops rolling-update cluster --yes --state $(KOPS_STATE_STORE)
@echo $ kubectl get psp
@echo $ kubectl edit psp kube-system
# EFS Storage
@echo https://github.com/Alfresco/alfresco-dbp-deployment#6-efs-storage-note-only-for-aws
@echo Don't forget to open inbound traffic in security group to allow NFS traffic
cluster-get-info: ## 6 - (k8s) display cluster info/passwords
kubectl cluster-info
@echo Kube secret for admin user:
@kops get secrets kube --type secret -oplaintext
@echo Admin secret for admin user:
@kops get secrets admin --type secret -oplaintext
cluster-master-ssh: ## (k8s) ssh to master node
ssh -o StrictHostKeyChecking=no -i $(SSH_PRIVATE_KEY_LOCATION) admin@api.$(KOPS_CLUSTER_NAME)
cluster-node-ssh: ## (k8s) ssh to master node
kubectl get nodes -o wide
@read -p "Enter Node External IP:" externalIP && echo $$externalIP > NODE_IP
ssh -o StrictHostKeyChecking=no -i $(SSH_PRIVATE_KEY_LOCATION) admin@$(shell cat NODE_IP)
cluster-pods-get: ## (k8s) show available pods
kubectl get pods --namespace=$(NAMESPACE)
cluster-pod-ssh: cluster-pods-get ## (k8s) ssh to POD
@read -p "Enter POD NAME:" podname && kubectl exec -it $$podname --namespace=$(NAMESPACE) -- /bin/bash
cluster-delete: ## (k8s) delete the cluster defined in cluster.env
kops delete cluster --state=$(KOPS_STATE_STORE) --yes
cluster-proxy: ## (k8s) enable k8s proxy
@kubectl proxy &
@echo Opening K8S Dashboard localy via proxy, click Skip on login page!
open http://localhost:8001/$(K8S_DASHBOAR_URL)
# this will create the ACS namespace
# as you see I'm copying the docker secret from default namespace
acs-namespace-create: ## 1 - (ACS) create namespace that includes ingress & roles for PSP
$(eval DEPLOY_ACS_NAME:=bau)
$(eval DEPLOY_INGRESS_NAME:=ingress-ass)
$(eval NAMESPACE:=$(NAMESPACE))
$(shell echo DEPLOY_ACS_NAME=$(DEPLOY_ACS_NAME) > $(ACTIVE_DEPLOY))
$(shell echo DEPLOY_INGRESS_NAME=$(DEPLOY_INGRESS_NAME) >> $(ACTIVE_DEPLOY))
$(shell echo NAMESPACE=$(NAMESPACE) >> $(ACTIVE_DEPLOY))
kubectl create namespace $(NAMESPACE)
kubectl get secret $(DOCKER_SECRET_NAME) -o json --namespace default | jq '.metadata.namespace = "$(NAMESPACE)"' | kubectl create -f -
# create a role in a namespace which grant PSP usage
# link "default" service account in the namespace to the role
# this is required to set up nginx ingress -> you need to have $ make cluster-prepare executed once
kubectl -n $(NAMESPACE) create role $(NAMESPACE):psp --verb=use --resource=podsecuritypolicy --resource-name=kube-system
kubectl -n $(NAMESPACE) create rolebinding $(NAMESPACE):psp:default --role=$(NAMESPACE):psp --serviceaccount=$(NAMESPACE):default
kubectl -n $(NAMESPACE) create rolebinding $(NAMESPACE):psp:ass-nginx-ingress --role=$(NAMESPACE):psp --serviceaccount=$(NAMESPACE):ass-nginx-ingress
# define the namespace in ingressvalues.yaml
@sed -i -e "s/namespace:.*/namespace: $(NAMESPACE)/" configs/ingressvalues.yaml
@sed -i -e "s/external-dns.alpha.kubernetes.io\/hostname:.*/external-dns.alpha.kubernetes.io\/hostname: $(NAMESPACE).YourDNSZone/" configs/ingressvalues.yaml
@rm -f configs/ingressvalues.yaml-e
helm install stable/nginx-ingress \
--name $(DEPLOY_INGRESS_NAME) \
--version 0.14.0 \
--set controller.scope.enabled=true \
--set controller.scope.namespace=$(NAMESPACE) \
--set rbac.create=true -f configs/ingressvalues.yaml \
--namespace $(NAMESPACE)
helm ls
acs-ingress-get: ## 2 - (ACS) get Ingress Status / Load Balancer IP address
$(eval LOAD_BALANCER_IP := $(shell kubectl --namespace $(NAMESPACE) get services -o jsonpath='{.items[*].status.loadBalancer.ingress[0].hostname}'))
@echo The Ingress Load Balancer is found at: $(LOAD_BALANCER_IP)
acs-helm-repo-add:
helm repo add alfresco-incubator http://kubernetes-charts.alfresco.com/incubator
helm repo add alfresco-stable http://kubernetes-charts.alfresco.com/stable
helm repo update
acs-chart-upload: acs-helm-repo-add ## 3 - (ACS) deploy helm charts
helm install $(CHART_NAME) \
--name $(DEPLOY_ACS_NAME) \
--set externalProtocol="https" \
--set externalHost="$(DEPLOY_ACS_NAME)-$(ROUTE_TAG).$(DOMAIN_NAME)" \
--set externalPort="443" \
--set repository.adminPassword="$(ACS_ADMIN_PASSWORD_MD5)" \
--set postgresql.postgresPassword="admin" \
--set persistence.reclaimPolicy=Recycle \
--set alfresco-infrastructure.persistence.efs.enabled=true \
--set alfresco-infrastructure.persistence.efs.dns="$(EFS_SERVER)" \
--set alfresco-search.common.resources.requests.memory="2500Mi",alfresco-search.common.resources.limits.memory="2500Mi" \
--set alfresco-search.common.environment.SOLR_JAVA_MEM="-Xms2000M -Xmx2000M" \
--set postgresql.persistence.subPath="$(NAMESPACE)/alfresco-content-services/database-data" \
--set persistence.repository.data.subPath="$(NAMESPACE)/alfresco-content-services/repository-data" \
--set alfresco-search.master.persistence.search.data.subPath="$(NAMESPACE)/alfresco-content-services/solr-data" \
--set alfresco-search.slave.persistence.search.data.subPath="$(NAMESPACE)/alfresco-content-services/solr-data-slave" \
--set alfresco-search.common.type="insight-engine" \
--set alfresco-search.common.registryPullSecrets="$(DOCKER_SECRET_NAME)" \
--set alfresco-search.common.ingress.enabled=true \
--set alfresco-search.common.ingress.basicAuth="YWRtaW46JGFwcjEkVEhjSS9NMDUkczVoQk1oVS8vLkJOekRIZXl6cW9HLg==" \
--set alfresco-search.common.ingress.whitelist_ips="0.0.0.0/0" \
--set alfresco-insight-zeppelin.enabled=true \
--set alfresco-search.alfresco-insight-zeppelin.registryPullSecrets="$(DOCKER_SECRET_NAME)" \
--set registryPullSecrets="$(DOCKER_SECRET_NAME)" \
--set alfresco-search.slave.enabled=true \
--set networkpolicysetting.enabled=false \
--namespace=$(NAMESPACE)
acs-route-create: acs-ingress-get ## 4 - (ACS) create Route53 entry
$(eval ROUTE_53_CONFIG_FILE:=configs/route53-entry.json)
$(eval ROUTE_ENTRY_NAME:=$(DEPLOY_ACS_NAME)-$(ROUTE_TAG).$(DOMAIN_NAME))
@sed -i -e "s/\"Action\":.*/\"Action\": \"CREATE\",/" $(ROUTE_53_CONFIG_FILE)
@sed -i -e "s/\"Name\":.*/\"Name\": \"$(ROUTE_ENTRY_NAME)\",/" $(ROUTE_53_CONFIG_FILE)
@sed -i -e "s/\"Value\":.*/\"Value\": \"$(LOAD_BALANCER_IP)\"/" $(ROUTE_53_CONFIG_FILE)
@rm -f configs/*.json-e
aws route53 change-resource-record-sets \
--hosted-zone-id $(HOSTED_ZONE_ID) \
--change-batch file://$(ROUTE_53_CONFIG_FILE)
open http://$(ROUTE_ENTRY_NAME)/share
@echo Route created: $(ROUTE_ENTRY_NAME) give a couple of seconds to wormup and refresh the page!
acs-route-open: ## 5 - (ACS) open the Route54 entry
$(eval ROUTE_ENTRY_NAME:=$(DEPLOY_ACS_NAME)-$(ROUTE_TAG).$(DOMAIN_NAME))
open http://$(ROUTE_ENTRY_NAME)/share
acs-chart-delete: ## 6 - (ACS) remove the deployment and cleanup namespace
helm delete --purge $(DEPLOY_ACS_NAME)
acs-namespace-delete: ## 7 - (ACS) delete the current namespace
helm delete --purge $(DEPLOY_INGRESS_NAME)
kubectl delete ns $(NAMESPACE)
helm ls
acs-route-delete: acs-ingress-get ## 8 - (ACS) delete Route53 entry
$(eval ROUTE_53_CONFIG_FILE:=configs/route53-entry.json)
$(eval ROUTE_ENTRY_NAME:=$(DEPLOY_ACS_NAME)-$(ROUTE_TAG).$(DOMAIN_NAME))
@sed -i -e "s/\"Action\":.*/\"Action\": \"DELETE\",/" $(ROUTE_53_CONFIG_FILE)
@sed -i -e "s/\"Name\":.*/\"Name\": \"$(ROUTE_ENTRY_NAME)\",/" $(ROUTE_53_CONFIG_FILE)
@sed -i -e "s/\"Value\":.*/\"Value\": \"$(LOAD_BALANCER_IP)\"/" $(ROUTE_53_CONFIG_FILE)
@rm -f configs/*.json-e
aws route53 change-resource-record-sets \
--hosted-zone-id $(HOSTED_ZONE_ID) \
--change-batch file://$(ROUTE_53_CONFIG_FILE)
acs-all-delete: acs-ingress-get acs-chart-delete acs-namespace-delete acs-route-delete ## (ACS) cleanup the namespace and delete the deployed chart(s)
acs-all-install: acs-namespace-create acs-chart-upload ## (ACS) create the namespace and upload the chart
$(shell echo ../.cmd/waitUntilPodsAvailable.sh $(NAMESPACE))
$(shell make acs-route-create || exit 0)
acs-efs-describe: ## (ACS) describe the EFS file system
aws efs describe-file-systems --file-system-id $(shell echo $(EFS_SERVER) | cut -d'.' -f1 )

View File

@@ -5,7 +5,7 @@ the base readme is keept in the bottom of this readme.
# Prerequisites
* have kubernetes cluster deployed using the bottom documentation (in this ca we skip the search acs deplyment for now as it is not chaos ready)
* have kubernetes cluster deployed
* build nodejs app for testing purposes (just an app that will display time)
```shell
@@ -43,105 +43,3 @@ the base readme is keept in the bottom of this readme.
```shell
kubectl create -f chaos.yaml
```
# KOPS CLUSTER SETUP
* how can I create a KOPS cluster in AWS
* how can I deploy ACS with ASS in my cluster
* all using simple make commands
:exclamation: **_I've collected/compressed in this script the knowledge scattered accross:_**
* https://github.com/Alfresco/alfresco-anaxes-shipyard
* https://github.com/Alfresco/acs-deployment
* https://github.com/Alfresco/alfresco-search-deployment
* https://github.com/Alfresco/alfresco-infrastructure-deployment
* kops/kubectl commands
* other utilities
# Prerequisites K8
* [IAM Group setup](https://github.com/kubernetes/kops/blob/master/docs/aws.md#setup-your-environment) with the following policies:
* AmazonEC2FullAccess
* IAMFullAccess
* AmazonS3FullAccess
* AmazonVPCFullAccess
* AmazonElasticFileSystemFullAccess
* AmazonRoute53FullAccess
* a user that is added in this group
* a SSH key generated
```shell
$ ssh-keygen -t rsa -b 4096 -C "anaxes_bastion"
```
* [EFS Storage](https://docs.aws.amazon.com/efs/latest/ug/creating-using-create-fs.html) created with his Security Group updated (Edit Inbound Rules and add Rule to accept NFS TCP from Anywhere)
* `quay-registry-secret.yaml` created under [./secrets](./secrets) folder, according to [anaxes guidelines](https://github.com/Alfresco/alfresco-anaxes-shipyard/blob/c7d50a124901a2f19b67b31fc49f2c77c729b4ed/SECRETS.md)
* update [.configs/cluster.env](./configs/cluster.env)
* to use the SSH keys above
* to use the EFS_SERVER defined
* to use KOPS_NAME as your username (to be unique)
* to use DOCKER_SECRET_NAME as the secret defined above
# Usage
1) from this "kops-cluster" folder run `$ make` to see available commands
2) checkout first the [.configs/cluster.env](./configs/cluster.env) - all commands are using these variables.
>:exclamation: if you already have a cluster defined, you can use these scripts, just change the KOPS_STATE_STORE to point to your bucket and you are all set!
3) you can execute each task one by one in the order displayed (1,2,3...)
![](.docs/intro.gif?raw=true)
4) or, execute the "all" task `$ make acs-all-install` that will do this automatically for you
# Features
## Cluster Related
a) gives you the ability to prepare the step to create a KOPS cluster in AWS based on [.configs/cluster.env](./configs/cluster.env)
* it will create a S3 bucket for you: `make cluster-bucket-create`
* it will install the KOPS cluster: `make cluster-install`
* it will validate the cluster
* it will install the K8S dashboard for you: `make cluster-dashboard-install`
* it will install the helm, tiller or defined the secrets automatically: `make cluster-prepare`
b) gives you the ability to SSH to your cluster MASTER node: `make cluster-master-ssh`
c) gives you the ability to SSH to your cluster WORKERS node(s): `make cluster-node-ssh`
* you will be prompted with the list of available nodes
* you will be asked on what node to connect to
d) gives you the ability to SSH to a particular POD:
* you will be prompted with the list of available nodes
* you will be asked on what POD_NAME to connect to
![](.docs/ssh-to-pod.gif?raw=true)
## ACS Related
>**Hint**: follow the numbered tasks displayed when you run: `make`
> :exclamation: The tasks are runnig on the last namespace created, on the last random helm chart deployed.
>You can run any task related to ACS individually according to your development needs.
> Example:
> * if namespace is created maybe you need only to update the chart (make acs-chart-upload) and create new route54 entry (make acs-route-create)
a) gives you the ability to define the NAMESPACE for your ACS deployment
* create the namespace with the ingress, security, etc automatically: `make acs-namespace-create`
* display the Load Balancer IP when is ready: `make acs-ingress get`
b) gives you the ability to deploy a helm chart (using random name) to namespace `make acs-chart-upload`
c) gives you the ability to define a Route53 CNAME entry: `make acs-route-create`
d) cleanup chart (`make acs-chart-delete`), namespace (`make acs-namespace-delete`) or all in one task: `make acs-all-delete`
e) prepare, install and deploy the app all in one task: `make acs-all-install`
![](.docs/all-acs-install.gif?raw=true)

View File

@@ -1,31 +0,0 @@
###########################################################################
# ********************** CLUSTER INFORMATION ******************************
KUBERNETES_VERSION = 1.9.5
KOPS_NAME = lsuciu-ass3
DOMAIN_NAME = dev.alfresco.me
# used in defining the route53: acs-<random>-ROUTE_TAG.DOMAIN_NAME
ROUTE_TAG = lsuciu
# go to https://console.aws.amazon.com/route53/home?region=eu-west-1#hosted-zones to found out the HOSTED_ZONE_ID
# currently using hosted zone defined in DOMAIN_NAME variable
HOSTED_ZONE_ID = Z15IEG419TWNPC
KOPS_CLUSTER_NAME = ${KOPS_NAME}.${DOMAIN_NAME}
S3_BUCKET_NAME = ${KOPS_NAME}-search-kops
KOPS_STATE_STORE = s3://${S3_BUCKET_NAME}
# using this private/public key I will create the cluster
SSH_PUBLIC_KEY_LOCATION = ~/.ssh/lsuciu-ass.pub
SSH_PRIVATE_KEY_LOCATION = ~/.ssh/lsuciu-ass
# what is the name of the quay secret that I will use to pull images
DOCKER_SECRET_NAME = quay-registry-secret
K8S_DASHBOAR_URL = api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/overview?namespace=default
###########################################################################
# ************************ ACS RELATED INFORMATION ***********************
NAMESPACE = alfresco-s-ass3
# this EFS_SERVER is in VPC of my (lsuciu-ass) cluster KOPS_CLUSTER_NAME
# check out https://github.com/Alfresco/alfresco-dbp-deployment#6-efs-storage-note-only-for-aws and the README.md
EFS_SERVER = fs-78d82cb0.efs.eu-west-1.amazonaws.com

View File

@@ -1,16 +0,0 @@
controller:
config:
ssl-redirect: "false"
scope:
enabled: true
namespace: alfresco-s-ass3
publishService:
enabled: true
service:
targetPorts:
https: 80
annotations:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: ""
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https
external-dns.alpha.kubernetes.io/hostname: alfresco-s-ass.YourDNSZone
service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01"

View File

@@ -1,14 +0,0 @@
{
"Comment":"CNAME for my ACS deployment",
"Changes":[{
"Action": "CREATE",
"ResourceRecordSet":{
"Name": "lsuciu3.dev.alfresco.me",
"Type":"CNAME",
"TTL":30,
"ResourceRecords":[{
"Value": "a0c3ad6a9e80611e8a9f302ac589cc94-762417480.eu-west-1.elb.amazonaws.com"
}]
}
}]
}

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system

View File

@@ -0,0 +1,3 @@
.git
.dockerignore
Dockerfile

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: quay-registry-secret
type: kubernetes.io/dockerconfigjson
data:
# Docker registries config json in base64 to do this just run - cat ~/.docker/config.json | base64
# REMEMBER TO ADD A WORKING USER FOR THIS
.dockerconfigjson: ewoJImF1dGhzIjogewoCSJkb2NrZXItaW50ZXJuYWwuYWxmcmVzY28uY29tIjogewoJCQkiYXV0aCI6ICJiSE4xWTJsMU9rbHVaV1poWW1sc01USWpKQT09IgoJCX0sCgkJImh0dHBzOi8vZG9ja2VyLWludGVybmFsLmFsZnJlc2NvLmNvbSI6IHsKCQkJImF1dGgiOiAiYkhOMVkybDFPa2x1WldaaFltbHNNVElqSkE9PSIKCQl9LAoJCSJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOiB7CgkJCSJhdXRoIjogImJtVnpjM0p2T2tGdFltbGxiblJoYkRFd01BPT0iCgkJfSwKCQkiaHR0cHM6Ly9xdWF5LmlvIjogewoJCQkiYXV0aCI6ICJiSE4xWTJsMU9tRnBkWEpsWVRFeU13PT0iCgkJfSwKCQkicXVheS5pbyI6IHsKCQkJImF1dGgiOiAiYkhOMVkybDFPbUZwZFhKbFlURXlNdz09IgoJCX0KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xNy4xMi4wLWNlIChkYXJ3aW4pIgoJfQp9