-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathMakefile
129 lines (106 loc) · 4.28 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
SHELL := /bin/bash # Use bash syntax
GIT_REVISION = `git rev-parse --short HEAD``git diff --quiet HEAD -- || echo "-dirty"`
# create a default.env config file to avoid being asked configs over and over
include default.env
DEPLOY_PROFILE ?= $(eval DEPLOY_PROFILE := $(shell bash -c 'read -p "Deploy Profile: " input; echo $$input'))$(DEPLOY_PROFILE)
BACKEND_PROFILE ?= $(eval BACKEND_PROFILE := $(shell bash -c 'read -p "Backend Profile: " input; echo $$input'))$(BACKEND_PROFILE)
STAGE ?= $(eval STAGE := $(shell bash -c 'read -p "Stage: " input; echo $$input'))$(STAGE)
REGION ?= $(eval REGION := $(shell bash -c 'read -p "Region: " input; echo $$input'))$(REGION)
# use local install of terraform
terraform = AWS_PROFILE=${BACKEND_PROFILE} terraform
## CONFIGURATION
check-dirty:
@git diff --quiet HEAD -- || { echo "ERROR: commit first, or use 'make force-deploy' to deploy dirty"; exit 1; }
ask-run-target:
@echo "Running with profile ${DEPLOY_PROFILE}..."
ask-deploy-target:
@echo "Deploying ${GIT_REVISION} in ${STAGE} with profile ${DEPLOY_PROFILE}, backend profile ${BACKEND_PROFILE}..."
# login to ECR
docker-login:
aws ecr get-login-password --region "${REGION}" --profile=${DEPLOY_PROFILE} | \
docker login --username AWS --password-stdin \
"$(shell aws sts get-caller-identity --profile=${DEPLOY_PROFILE} --query 'Account' --output text).dkr.ecr.${REGION}.amazonaws.com"
## BUILD CONTAINERS AND LAMBDAS
build:
cd rust; cargo build
rust/target/docker/%.zip: $(shell find rust/src -type f) rust/Cargo.toml docker/Dockerfile
mkdir -p ./rust/target/docker
DOCKER_BUILDKIT=1 docker build \
-f docker/Dockerfile \
--build-arg BIN_NAME=$* \
--target export-stage \
--output ./rust/target/docker \
.
# package the lambda functions
package-lambdas: rust/target/docker/trigger.zip
# create standalone (scheduler+executor) container
package-standalone:
DOCKER_BUILDKIT=1 docker build \
-t cloudfuse/ballista-standalone:${GIT_REVISION} \
-f docker/Dockerfile \
--build-arg BIN_NAME=standalone \
--build-arg PORT=50050 \
--target runtime-stage \
.
# create executor container
package-executor:
DOCKER_BUILDKIT=1 docker build \
-t cloudfuse/ballista-executor:${GIT_REVISION} \
-f docker/Dockerfile \
--build-arg BIN_NAME=executor \
--build-arg PORT=50051 \
--target runtime-stage \
.
## MANAGE AWS INFRASTRUCTURE
# init terraform stack
init:
@cd infra; ${terraform} init
@cd infra; ${terraform} workspace new ${STAGE} &>/dev/null || echo "${STAGE} already exists"
# destroy terraform stack
destroy: ask-deploy-target
cd infra; ${terraform} destroy \
--var profile=${DEPLOY_PROFILE} \
--var region_name=${REGION}
# deploy/update the terraform stack, requires to be logged in to ECR
deploy-all: ask-deploy-target package-standalone package-executor package-lambdas
@echo "DEPLOYING ${GIT_REVISION} on ${STAGE}..."
@cd infra; ${terraform} workspace select ${STAGE}
@cd infra; ${terraform} apply \
--var profile=${DEPLOY_PROFILE} \
--var region_name=${REGION} \
--var git_revision=${GIT_REVISION}
@echo "${GIT_REVISION} DEPLOYED !!!"
## TPHC DATASET
# generate mock data for testing
generate-tpch-data:
docker build \
-t cloudfuse/ballista-tpchgen:v1 \
-f docker/tpch/Dockerfile \
.
mkdir -p data
docker run -v `pwd`/data:/data -it --rm cloudfuse/ballista-tpchgen:v1
# copy mock data from S3 to EFS
copy-to-efs:
aws lambda invoke \
--function-name $(shell bash -c 'cd infra; ${terraform} output copy_data_lambda_name') \
--log-type Tail \
--region ${REGION} \
--profile ${DEPLOY_PROFILE} \
--query 'LogResult' \
--output text \
/dev/null | base64 -d
## RUN QUERIES
# start the cluster locally with docker compose and run a query
run-integ-docker: ask-run-target
COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker-compose -f docker/docker-compose.yml build
COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 AWS_PROFILE=${DEPLOY_PROFILE} docker-compose -f docker/docker-compose.yml up --abort-on-container-exit
# call the trigger lambda to start the cluster and run a query
run-integ-aws: ask-run-target
AWS_MAX_ATTEMPTS=1 aws lambda invoke \
--function-name $(shell bash -c 'cd infra; ${terraform} output trigger_lambda_name') \
--log-type Tail \
--region ${REGION} \
--profile ${DEPLOY_PROFILE} \
--query 'LogResult' \
--output text \
/dev/null | base64 -d