forked from matter-labs/zksync-era
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose-gpu-runner-cuda-12-0.yml
53 lines (52 loc) · 1.69 KB
/
docker-compose-gpu-runner-cuda-12-0.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
version: '3.2'
services:
geth:
image: "matterlabs/geth:latest"
environment:
- PLUGIN_CONFIG
zk:
image: matterlabs/zk-environment:cuda-12-0-latest
depends_on:
- geth
- postgres
security_opt:
- seccomp:unconfined
command: tail -f /dev/null
volumes:
- .:/usr/src/zksync
- /usr/src/cache:/usr/src/cache
- /var/run/docker.sock:/var/run/docker.sock
- /usr/src/keys:/mnt/prover_setup_keys
environment:
- CACHE_DIR=/usr/src/cache
- SCCACHE_CACHE_SIZE=50g
- SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage
- SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com
- SCCACHE_ERROR_LOG=/tmp/sccache_log.txt
- SCCACHE_GCS_RW_MODE=READ_WRITE
- CI=1
- GITHUB_WORKSPACE=$GITHUB_WORKSPACE
# We set CUDAARCHS for l4 gpu's
- CUDAARCHS=89
# We need to forward all nvidia-devices, as due to bug with cgroups and nvidia-container-runtime (https://github.com/NVIDIA/libnvidia-container/issues/176#issuecomment-1159454366), cgroups are disabled and thou GPU isn't properly forwarded to dind
devices:
- /dev/nvidia0:/dev/nvidia0
- /dev/nvidiactl:/dev/nvidiactl
- /dev/nvidia-caps:/dev/nvidia-caps
- /dev/nvidia-modeset:/dev/nvidia-modeset
- /dev/nvidia-uvm:/dev/nvidia-uvm
- /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools
env_file:
- ./.env
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
postgres:
image: "postgres:14"
command: postgres -c 'max_connections=200'
ports:
- 127.0.0.1:5432:5432
environment:
- POSTGRES_HOST_AUTH_METHOD=trust