-
Notifications
You must be signed in to change notification settings - Fork 0
/
Makefile
161 lines (137 loc) · 5.46 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
.PHONY: clean setup train_lite sync_data_to_s3 sync_data_from_s3 sync_data_to_azure sync_data_from_azure sync_data_to_gcs sync_data_from_gcs
#################################################################################
# GLOBALS #
#################################################################################
PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
BUCKET = mungana-public-assets
PROFILE = default
PROJECT_NAME = train-lowres-polyglot-llms
PYTHON_INTERPRETER = python3
ifeq (,$(shell which conda))
HAS_CONDA=False
else
HAS_CONDA=True
endif
#################################################################################
# COMMANDS #
#################################################################################
## Install NVIDIA Drivers and CUDA Toolkit
nvdia_setup:
/bin/bash scripts/nvidia_setup.sh
## Install Python Dependencies + Other OS-level dependencies
setup:
/bin/bash scripts/server_setup.sh
## Verify the environment is setup correctly
verify: test_environment
$(PYTHON_INTERPRETER) scripts/check_gpu.py
## Delete all compiled Python files
clean:
find . -type f -name "*.py[co]" -delete
find . -type d -name "__pycache__" -delete
## Train a light-weight model for demonstration purposes
train_lite:
@echo ">>> Training a light-weight model to demonstrate the training process."
/bin/bash scripts/train_sp_tokenizer.sh --input-texts-path demos/data/sample.bantu.ven.txt --sampled-texts-path data/temp/tshivenda-tokenizer/0 --seed 47 --alpha 0.5 --tokenizer-model-type unigram --vocab-size 5000 --tokenizer-output-dir data/tokenizers/tshivenda-xlmr-5k
@echo ">>> Tokenizer ready, moving on to training the language model."
/bin/bash scripts/train_masked_xlm.sh --config configs/tshivenda-xlmr-base.yml --training_data demos/data --experiment_name tshivenda-xlmr-lite --tokenizer_path data/tokenizers/tshivenda-xlmr-5k --epochs 5
## Upload Data to S3 - Requires AWS CLI to be configured
# Authentication guide:
sync_data_to_s3:
dvc push -r s3
## Download Data from S3
sync_data_from_s3:
dvc pull -r s3
## Upload data to Azure Blob Storage -
# Authentication guide:
sync_data_to_azure:
dvc push -r az
## Download data from Azure Blob Storage
sync_data_from_azure:
dvc pull -r az
## Upload data to Google Cloud Storage
# Authentication guide:
sync_data_to_gcs:
dvc push -r gcs
## Download data from Google Cloud Storage
sync_data_from_gcs:
dvc pull -r gcs
## Set up python interpreter environment
env:
ifeq (True,$(HAS_CONDA))
@echo ">>> Detected conda, creating conda environment."
conda env create -f environment.yml
@echo ">>> New conda env created. Activate with:\nsource activate $(PROJECT_NAME)"
else
$(PYTHON_INTERPRETER) -m pip install -q virtualenv virtualenvwrapper
@echo ">>> Installing virtualenvwrapper if not already installed.\nMake sure the following lines are in shell startup file\n\
export WORKON_HOME=$$HOME/.virtualenvs\nexport PROJECT_HOME=$$HOME/Devel\nsource /usr/local/bin/virtualenvwrapper.sh\n"
@bash -c "source `which virtualenvwrapper.sh`;mkvirtualenv $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER)"
@echo ">>> New virtualenv created. Activate with:\nworkon $(PROJECT_NAME)"
endif
### Convert pyproject.toml to requirements.txt
freeze:
poetry export -f requirements.txt --output requirements.txt --without-hashes
## Test python environment is setup correctly
test_environment:
$(PYTHON_INTERPRETER) test_environment.py
#################################################################################
# PROJECT RULES #
#################################################################################
#################################################################################
# Self Documenting Commands #
#################################################################################
.DEFAULT_GOAL := help
# Inspired by <http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html>
# sed script explained:
# /^##/:
# * save line in hold space
# * purge line
# * Loop:
# * append newline + line to hold space
# * go to next line
# * if line starts with doc comment, strip comment character off and loop
# * remove target prerequisites
# * append hold space (+ newline) to line
# * replace newline plus comments by `---`
# * print line
# Separate expressions are necessary because labels cannot be delimited by
# semicolon; see <http://stackoverflow.com/a/11799865/1968>
.PHONY: help
help:
@echo "$$(tput bold)Available rules:$$(tput sgr0)"
@echo
@sed -n -e "/^## / { \
h; \
s/.*//; \
:doc" \
-e "H; \
n; \
s/^## //; \
t doc" \
-e "s/:.*//; \
G; \
s/\\n## /---/; \
s/\\n/ /g; \
p; \
}" ${MAKEFILE_LIST} \
| LC_ALL='C' sort --ignore-case \
| awk -F '---' \
-v ncol=$$(tput cols) \
-v indent=19 \
-v col_on="$$(tput setaf 6)" \
-v col_off="$$(tput sgr0)" \
'{ \
printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
n = split($$2, words, " "); \
line_length = ncol - indent; \
for (i = 1; i <= n; i++) { \
line_length -= length(words[i]) + 1; \
if (line_length <= 0) { \
line_length = ncol - indent - length(words[i]) - 1; \
printf "\n%*s ", -indent, " "; \
} \
printf "%s ", words[i]; \
} \
printf "\n"; \
}' \
| more $(shell test $(shell uname) = Darwin && echo '--no-init --raw-control-chars')