-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
123 lines (123 loc) · 4.18 KB
/
docker-compose.yml
File metadata and controls
123 lines (123 loc) · 4.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
services:
petric:
build: {context: ., target: sirf}
image: ghcr.io/synerbi/sirf:petric2
# quick test of installed libraries
deploy: {resources: {reservations: {devices: [{driver: nvidia, count: all, capabilities: [gpu]}]}}}
command:
- python
- -c
- |
import torch, tensorflow as tf
print("torch:", torch.cuda.is_available(), "| tf:", tf.config.list_physical_devices("GPU"))
leaderboard:
networks: [reverse_proxy]
build: {context: ., target: leaderboard}
image: synerbi/tensorboard:latest
restart: always
expose: [6006]
command: "tensorboard --logdir=/logs --bind_all --window_title='PETRIC Leaderboard' --path_prefix /tensorboard"
volumes: ["/opt/runner/logs/1:/logs"]
healthcheck: {test: "wget --spider localhost:6006/tensorboard"}
labels:
virtual.host: petric.tomography.stfc.ac.uk
virtual.tls-email: [email protected]
# /tensorboard -> leaderboard:6006/tensorboard
virtual.port: 6006
virtual.proxy.matcher: /tensorboard*
# /data -> data:80
# /2/data -> data2:80
# /data-raw -> caddy:/share/petric
# /data-wip -> data-wip:80
# /leaderboard -> caddy:/share/petric/1/leaderboard.md
# /2/leaderboard -> caddy:/share/petric/2/leaderboard.md
# /2/tensorboard -> leaderboard2:6006/2/tensorboard
# / -> wiki
virtual.host.directives: |
handle /data-wip* {
import ext_auth
reverse_proxy http://data-wip
}
handle_path /data-raw* {
root * /share/petric
file_server browse
}
handle /data* {
reverse_proxy http://data
}
handle /2/data* {
reverse_proxy http://data2
}
handle /2/tensorboard* {
reverse_proxy http://leaderboard2:6006
}
redir / https://github.com/SyneRBI/PETRIC2/wiki
redir /leaderboard /leaderboard/
redir /data/files/leaderboard* /leaderboard/
redir /provisional* /leaderboard/
redir /data-wip /data-wip/
redir /data-raw /data-raw/
redir /data /data/
redir /2/data /2/data/
redir /2/leaderboard /2/leaderboard/
handle_path /leaderboard* {
root * /share/petric/1
handle / {
header Content-Type text/html
templates {
between "<<" ">>"
}
respond <<HTML
<!DOCTYPE html>
<html><head><title>PETRIC Leaderboard</title></head>
<body><<readFile "leaderboard.md" | markdown>></body></html>
HTML 200
}
file_server
}
handle_path /2/leaderboard* {
root * /share/petric/2
handle / {
header Content-Type text/html
templates {
between "<<" ">>"
}
respond <<HTML
<!DOCTYPE html>
<html><head><title>PETRIC2 Leaderboard</title></head>
<body><<readFile "leaderboard.md" | markdown>></body></html>
HTML 200
}
file_server
}
leaderboard2:
networks: [reverse_proxy]
build: {context: ., target: leaderboard}
image: synerbi/tensorboard:latest
restart: always
expose: [6006]
command: "tensorboard --logdir=/logs --bind_all --window_title='PETRIC Leaderboard' --path_prefix /2/tensorboard"
volumes: ["/opt/runner/logs/2:/logs"]
healthcheck: {test: "wget --spider localhost:6006/2/tensorboard"}
data:
networks: [reverse_proxy]
image: filebrowser/filebrowser:latest
restart: always
volumes: ["/mnt/share-public/petric/1:/srv:ro"]
user: "1000:1005" # ubuntu:runner
command: [--noauth, -b=/data, -l=stdout]
data2:
networks: [reverse_proxy]
image: filebrowser/filebrowser:latest
restart: always
volumes: ["/mnt/share-public/petric/2:/srv:ro"]
user: "1000:1005"
command: [--noauth, -b=/2/data, -l=stdout]
data-wip:
networks: [reverse_proxy]
image: filebrowser/filebrowser:latest
restart: always
volumes: ["/mnt/share-public/petric/wip:/srv"]
user: "1000:1005" # ubuntu:runner
command: [--noauth, -b=/data-wip, -l=stdout]
networks: {reverse_proxy: {external: true}}