mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 10:45:57 +03:00
Compare commits
502 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
1e5306b820 | ||
|
6890c25ea1 | ||
|
48482fece0 | ||
|
1dc1d4df72 | ||
|
2b4dd6f137 | ||
|
3da44a8d30 | ||
|
34ea10475d | ||
|
ced7f1771a | ||
|
af2235bf88 | ||
|
305de2e2cd | ||
|
8756c5c255 | ||
|
27609ac4cc | ||
|
95d906bdbb | ||
|
4bb0d7bc05 | ||
|
d9599155ae | ||
|
244bad3a24 | ||
|
f7056bcaa5 | ||
|
994669fb69 | ||
|
3ab90259f2 | ||
|
155109dea1 | ||
|
b268c3dd1c | ||
|
4e64dbdde4 | ||
|
a2955daffe | ||
|
d3921b973b | ||
|
cf6ad3cb15 | ||
|
90e0b7fec6 | ||
|
d77333576b | ||
|
73ff8d79f7 | ||
|
95fc88ae5b | ||
|
1d0eaac260 | ||
|
3565bfc939 | ||
|
a82c04910f | ||
|
233f03ca2b | ||
|
93c881a7a9 | ||
|
0af3956abd | ||
|
15feff3e79 | ||
|
5c5700caa7 | ||
|
3bddc176d6 | ||
|
9caf4bf383 | ||
|
9b2234fa0e | ||
|
1f79fdec4e | ||
|
a56f4c97e4 | ||
|
3a3390963c | ||
|
fd27759a95 | ||
|
01d8056c73 | ||
|
81fa33ebb5 | ||
|
e8aa3bc066 | ||
|
0bf0125e82 | ||
|
6209e778e5 | ||
|
5323283f98 | ||
|
57e17d0648 | ||
|
da55d5ec70 | ||
|
828a060698 | ||
|
3e5971b9db | ||
|
47c2625d38 | ||
|
49af9cf4f5 | ||
|
6b1daeba05 | ||
|
9f1240d8d9 | ||
|
a8138be69b | ||
|
ea57dc3bc9 | ||
|
131348a49f | ||
|
b22564cb00 | ||
|
16eb0a56f9 | ||
|
3e4ff47a38 | ||
|
8ea01a67f6 | ||
|
aa5cc642e1 | ||
|
a121cb6f00 | ||
|
60164182ae | ||
|
f842a80cdb | ||
|
4b6a574ee0 | ||
|
f9ebb780f9 | ||
|
1fc6c30652 | ||
|
46a1a013cd | ||
|
551810c486 | ||
|
b987ba506d | ||
|
84810f2bb2 | ||
|
424d666a50 | ||
|
a71359f647 | ||
|
d93c344176 | ||
|
b9c3213b90 | ||
|
95e24ffc51 | ||
|
00d56d7295 | ||
|
7436b454db | ||
|
8da5b99482 | ||
|
2969e87b52 | ||
|
ce62e898c3 | ||
|
431462d839 | ||
|
7d0e234b34 | ||
|
dad1b1bee9 | ||
|
9312cebee3 | ||
|
cdf5b6ec2d | ||
|
ce99fc8f95 | ||
|
a75d050001 | ||
|
75cfd10f11 | ||
|
9859ba6339 | ||
|
513056f711 | ||
|
ebe334fcc7 | ||
|
0eec12472e | ||
|
39106d440a | ||
|
9117095764 | ||
|
099bba950c | ||
|
e37ff60617 | ||
|
5b14608041 | ||
|
ad92692bab | ||
|
d956d42903 | ||
|
d69be7d03a | ||
|
f82de8d00d | ||
|
c836f88ff2 | ||
|
8b660ae090 | ||
|
9323c57f49 | ||
|
85e3c73525 | ||
|
a74bc2e58f | ||
|
0680638933 | ||
|
46d31ee5f7 | ||
|
e794b397d3 | ||
|
d41350050b | ||
|
4cd5b06b7f | ||
|
cd768439d2 | ||
|
9e5fd2d576 | ||
|
ecb46f591c | ||
|
d62d53aa8e | ||
|
2c515ab13c | ||
|
83d556ff0c | ||
|
678d313836 | ||
|
705d840ea3 | ||
|
7dff8c01dd | ||
|
5860679624 | ||
|
4628e4519d | ||
|
b884fd20a1 | ||
|
67c657003d | ||
|
580c1bbc7d | ||
|
2b6383d243 | ||
|
f27455a26f | ||
|
1d4f900e48 | ||
|
c5ca588a6f | ||
|
06888251e3 | ||
|
1a6e4cf4e4 | ||
|
9f86196a9d | ||
|
1e31043fb3 | ||
|
85adcf1ae5 | ||
|
9abb4d2873 | ||
|
235ff44736 | ||
|
9c2d741749 | ||
|
37cc0c34cf | ||
|
5633b6ac94 | ||
|
175f2aeace | ||
|
feefe69094 | ||
|
46df3ee7cd | ||
|
bb945ad01b | ||
|
de86aa671e | ||
|
e38771bbbd | ||
|
a3f9a8d7dc | ||
|
4b6bc6ef66 | ||
|
455a23361f | ||
|
1a8ec04733 | ||
|
4e60df7a08 | ||
|
219a9d9f5e | ||
|
48baf723a4 | ||
|
6530904883 | ||
|
d15d24f4ff | ||
|
8d992d637e | ||
|
6ebc83c3b7 | ||
|
b32f4451ee | ||
|
99142c7552 | ||
|
db710bb931 | ||
|
a9e9a397d8 | ||
|
d46a6ac687 | ||
|
1eb5495802 | ||
|
7cf8809d77 | ||
|
043aa27aa3 | ||
|
9824d94a1c | ||
|
e8ef76b8f9 | ||
|
be1ddb4203 | ||
|
caddf21fca | ||
|
5379329ef7 | ||
|
6faaeaae66 | ||
|
3fed323385 | ||
|
58a928547d | ||
|
558410c5bd | ||
|
0dc0decaa7 | ||
|
d11d663c5c | ||
|
771233176f | ||
|
ed70b07d81 | ||
|
e25fc7083d | ||
|
fa364c3f2c | ||
|
b5f9fe4d3b | ||
|
013d4c28b2 | ||
|
63acc8619b | ||
|
ec920b5756 | ||
|
95caaf2a40 | ||
|
7099f8bee8 | ||
|
b41a0d840c | ||
|
c577ade90e | ||
|
257b143df1 | ||
|
34ee326ce9 | ||
|
090104ce1b | ||
|
3305d5dc92 | ||
|
296063e135 | ||
|
b9daa59e5d | ||
|
5bdcfe128d | ||
|
1842a796fb | ||
|
ce99e5c583 | ||
|
0c96c2d305 | ||
|
5796b6b554 | ||
|
c7ab27c86f | ||
|
8c03746a67 | ||
|
8746d36845 | ||
|
448e6ac917 | ||
|
729c9cff41 | ||
|
22b9c80007 | ||
|
ab4355cfed | ||
|
948dc82228 | ||
|
bc74fd23e7 | ||
|
37776241be | ||
|
feba41ec88 | ||
|
6a8f42da8a | ||
|
670d8cb83a | ||
|
2f7fbde789 | ||
|
c698bca2b9 | ||
|
0b6a003a8b | ||
|
c64560016e | ||
|
978be0b4a9 | ||
|
b58bff1178 | ||
|
2f3e18caa9 | ||
|
6a291040bd | ||
|
dbc082dc75 | ||
|
32a0dd09bf | ||
|
f847c6e225 | ||
|
99da5fbebb | ||
|
6a0d024c69 | ||
|
b24929a243 | ||
|
9a47821642 | ||
|
d69968313b | ||
|
3c377d97dc | ||
|
ea15218197 | ||
|
0eee907c88 | ||
|
c877583979 | ||
|
844cf70345 | ||
|
a0d92a167c | ||
|
d7b0d6f9f5 | ||
|
4c3b328aca | ||
|
260ffee093 | ||
|
c59cfe3371 | ||
|
0822c0c128 | ||
|
57a88f0a1b | ||
|
87393409f9 | ||
|
062f5e4712 | ||
|
aaba1e8368 | ||
|
ff2684dfee | ||
|
6b5fa201aa | ||
|
7167e443ca | ||
|
175d647e47 | ||
|
4c324e1160 | ||
|
0365b7c6a4 | ||
|
19889187a5 | ||
|
9571277c44 | ||
|
a202da9e23 | ||
|
e5a77a477d | ||
|
c05dc50f53 | ||
|
3bbdbb832c | ||
|
d9684bef6b | ||
|
db0c45c172 | ||
|
ad4393e3f7 | ||
|
f83a8a36d1 | ||
|
0e9eba8c8b | ||
|
d5c760960a | ||
|
2c6ef2bc68 | ||
|
7032ae5587 | ||
|
eba22c2d94 | ||
|
11cc9ae0c0 | ||
|
fb648db47d | ||
|
959283d333 | ||
|
385c2227e7 | ||
|
6d9f03e84b | ||
|
6a972e4b19 | ||
|
171b174ce9 | ||
|
93b7ded1e6 | ||
|
29c6b145ca | ||
|
a7a479623c | ||
|
83dff9ae6e | ||
|
6b2cc5a3ee | ||
|
5247e0d773 | ||
|
05b308b8b4 | ||
|
9621278fca | ||
|
570d6c8bf9 | ||
|
ad48e9ed0f | ||
|
f724addf9a | ||
|
aa20974703 | ||
|
a846f6c610 | ||
|
c218c34812 | ||
|
2626e66873 | ||
|
81e0e1b339 | ||
|
fd1354d00e | ||
|
071a3b2a32 | ||
|
32cfaab5ee | ||
|
d348f12a0e | ||
|
11845d9f5b | ||
|
de70fbf88a | ||
|
0b04caab78 | ||
|
4c78c5a9c9 | ||
|
73f0841f17 | ||
|
4559e85daa | ||
|
bbef332e25 | ||
|
1e950c7dbc | ||
|
f14e19a3d8 | ||
|
668d5c23dc | ||
|
fb6f96f5c3 | ||
|
6e6e34ff18 | ||
|
790146bfac | ||
|
af625930d6 | ||
|
a28ebcb401 | ||
|
77e47ddd1f | ||
|
5b620ba6cd | ||
|
d5f9b33f66 | ||
|
596c9b8691 | ||
|
d4357eb55a | ||
|
b37f0dfde3 | ||
|
624791e09a | ||
|
f9a73a9bbe | ||
|
35868dd72c | ||
|
979d010dc2 | ||
|
b34d548246 | ||
|
a87646b8cb | ||
|
a2411eef56 | ||
|
52ed8e4d75 | ||
|
24c914799d | ||
|
db53511855 | ||
|
325691e588 | ||
|
fac3cb687d | ||
|
afbf1db331 | ||
|
1aefaec297 | ||
|
f1d3fb5d40 | ||
|
ac2723f898 | ||
|
2fffaec226 | ||
|
5c54dfee3a | ||
|
967d2d78ec | ||
|
1aa5e0d4dc | ||
|
b47cf97409 | ||
|
5e802f8aa3 | ||
|
0bdeb02a31 | ||
|
b03698fadb | ||
|
39d1a09704 | ||
|
a447e4e7ef | ||
|
4eee6e7aee | ||
|
b6fde857a7 | ||
|
3c66deb5cc | ||
|
4146612a32 | ||
|
a314933557 | ||
|
c5d7e3f2bc | ||
|
c95a2881b5 | ||
|
4c3727b4a3 | ||
|
a1f304dff7 | ||
|
a8870eef0d | ||
|
afaebc6cf3 | ||
|
8f4a1f4fc2 | ||
|
0807783388 | ||
|
80d4061d14 | ||
|
dc2f8e5c85 | ||
|
aee1ea032b | ||
|
484e82fb9f | ||
|
322a08edfb | ||
|
08afc312c3 | ||
|
5571a5d8ed | ||
|
6a8c65493f | ||
|
dfdf4473ea | ||
|
8bbbff7567 | ||
|
42e37ebea1 | ||
|
632f4d5453 | ||
|
6c5e35ce5c | ||
|
4ff15f6dc2 | ||
|
ec8028aef2 | ||
|
63cbd9ef9c | ||
|
9cca64003a | ||
|
819d5e2dc8 | ||
|
3b06ab296b | ||
|
0de52c6c99 | ||
|
e3b00b59a7 | ||
|
5a390a973f | ||
|
1ee8e44912 | ||
|
86685c1cd2 | ||
|
e3feba2a2c | ||
|
0a68de6c24 | ||
|
4be8dae626 | ||
|
e4d08836e2 | ||
|
c2a324e5da | ||
|
77f95146d6 | ||
|
6cd8512bbd | ||
|
843604c9e7 | ||
|
7407b8326a | ||
|
adf47827c9 | ||
|
5471088e93 | ||
|
4e85a1dee1 | ||
|
ec60839064 | ||
|
d4bfa1a189 | ||
|
862d401077 | ||
|
255a06382d | ||
|
bbb0484d03 | ||
|
93346bc05d | ||
|
fdf50f0064 | ||
|
ccf6ee79d0 | ||
|
91dd19473d | ||
|
c06162b22f | ||
|
7a6a3e4160 | ||
|
94341f9f3f | ||
|
ff19fb3426 | ||
|
baac8d9627 | ||
|
669b101e6a | ||
|
935f38692f | ||
|
d2d9fb08cc | ||
|
b85d548879 | ||
|
35f30088b2 | ||
|
dce054e632 | ||
|
ba725e1c25 | ||
|
b837348b25 | ||
|
7d9c7017c9 | ||
|
d6b9b8bf0c | ||
|
bd09fe1a3d | ||
|
bcbe6177b8 | ||
|
9b1d07365e | ||
|
37b212427c | ||
|
078234d8b3 | ||
|
3ce0c3d1a5 | ||
|
2ee07ea1d8 | ||
|
40c339db9b | ||
|
402c1cd06c | ||
|
819f340f39 | ||
|
1b4b40c95d | ||
|
afd9f4e278 | ||
|
47a9461f39 | ||
|
c6f64d8368 | ||
|
edabf19ddf | ||
|
a30d5f4cf9 | ||
|
3fa78e7bb1 | ||
|
a8a7e4f9a5 | ||
|
5d3b765a23 | ||
|
70f3ab8ec3 | ||
|
b6612e90ca | ||
|
161cccca30 | ||
|
84dc2eda1f | ||
|
390d10d656 | ||
|
1f775f4414 | ||
|
cc404b4edc | ||
|
536672ac1b | ||
|
e41e7c07db | ||
|
f1d3b03c60 | ||
|
2ebff958a4 | ||
|
edfdda86ae | ||
|
97fb7b5b96 | ||
|
f6de144cbb | ||
|
5a974c7b94 | ||
|
5f61607419 | ||
|
7439aeb63e | ||
|
cd8907542a | ||
|
8a5450e830 | ||
|
ad9f2b2d8e | ||
|
2f4a9865e1 | ||
|
0a3008e753 | ||
|
29a0795219 | ||
|
63459c5f72 | ||
|
916e96b143 | ||
|
325039c316 | ||
|
c5b97f4146 | ||
|
03233429f4 | ||
|
0a72c4b6db | ||
|
8867626de8 | ||
|
f5916ec396 | ||
|
ebb36235a7 | ||
|
def174a517 | ||
|
2798f623d4 | ||
|
480ba933fa | ||
|
3d1ee9ef62 | ||
|
5352321fe1 | ||
|
c4101162d6 | ||
|
632d55265b | ||
|
e277f7d1c1 | ||
|
ff7b4a3d38 | ||
|
d212dfe735 | ||
|
84ed185579 | ||
|
c0ba3406ef | ||
|
e196ba6e86 | ||
|
76743aee48 | ||
|
9ebca99290 | ||
|
a734ad2d36 | ||
|
baf7d1be4e | ||
|
31bcd1bf7c | ||
|
a3b30ed65a | ||
|
402c857d17 | ||
|
def858854b | ||
|
f6761ac30e | ||
|
f8e49ea3f4 | ||
|
f6a4a2127b | ||
|
446fc3f1f8 | ||
|
146525db91 | ||
|
1698b43f9b | ||
|
078b21db85 | ||
|
43adcde094 | ||
|
7a0bb18dcf | ||
|
47a5a4e1fc | ||
|
0f0e5876ae | ||
|
43aa75dc89 | ||
|
8280d200ea | ||
|
f250c54813 |
@@ -3,6 +3,9 @@ target
|
|||||||
|
|
||||||
# Data folder
|
# Data folder
|
||||||
data
|
data
|
||||||
|
.env
|
||||||
|
.env.template
|
||||||
|
.gitattributes
|
||||||
|
|
||||||
# IDE files
|
# IDE files
|
||||||
.vscode
|
.vscode
|
||||||
@@ -10,5 +13,15 @@ data
|
|||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
|
.github
|
||||||
*.md
|
*.md
|
||||||
|
*.txt
|
||||||
|
*.yml
|
||||||
|
*.yaml
|
||||||
|
|
||||||
|
# Docker folders
|
||||||
|
hooks
|
||||||
|
tools
|
||||||
|
|
||||||
|
# Web vault
|
||||||
|
web-vault
|
23
.editorconfig
Normal file
23
.editorconfig
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# EditorConfig is awesome: https://EditorConfig.org
|
||||||
|
|
||||||
|
# top-most EditorConfig file
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
charset = utf-8
|
||||||
|
|
||||||
|
[*.{rs,py}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[*.{yml,yaml}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[Makefile]
|
||||||
|
indent_style = tab
|
142
.env.template
142
.env.template
@@ -1,19 +1,34 @@
|
|||||||
## Bitwarden_RS Configuration File
|
## Vaultwarden Configuration File
|
||||||
## Uncomment any of the following lines to change the defaults
|
## Uncomment any of the following lines to change the defaults
|
||||||
|
##
|
||||||
|
## Be aware that most of these settings will be overridden if they were changed
|
||||||
|
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
## Database URL
|
## Database URL
|
||||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||||
## When using MySQL, this it is the URL to the DB, including username and password:
|
|
||||||
## Format: mysql://[user[:password]@]host/database_name
|
|
||||||
# DATABASE_URL=data/db.sqlite3
|
# DATABASE_URL=data/db.sqlite3
|
||||||
|
## When using MySQL, specify an appropriate connection URI.
|
||||||
|
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
||||||
|
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||||
|
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||||
|
## or keyword/value connection string.
|
||||||
|
## Details:
|
||||||
|
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
||||||
|
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||||
|
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||||
|
|
||||||
|
## Database max connections
|
||||||
|
## Define the size of the connection pool used for connecting to the database.
|
||||||
|
# DATABASE_MAX_CONNS=10
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
# ATTACHMENTS_FOLDER=data/attachments
|
# ATTACHMENTS_FOLDER=data/attachments
|
||||||
|
# SENDS_FOLDER=data/sends
|
||||||
|
|
||||||
## Templates data folder, by default uses embedded templates
|
## Templates data folder, by default uses embedded templates
|
||||||
## Check source code to see the format
|
## Check source code to see the format
|
||||||
@@ -21,9 +36,9 @@
|
|||||||
## Automatically reload the templates for every request, slow, use only for development
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
# RELOAD_TEMPLATES=false
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Client-IP"
|
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
# IP_HEADER=X-Client-IP
|
# IP_HEADER=X-Real-IP
|
||||||
|
|
||||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
# ICON_CACHE_TTL=2592000
|
# ICON_CACHE_TTL=2592000
|
||||||
@@ -41,9 +56,30 @@
|
|||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||||
# WEBSOCKET_PORT=3012
|
# WEBSOCKET_PORT=3012
|
||||||
|
|
||||||
|
## Job scheduler settings
|
||||||
|
##
|
||||||
|
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
|
||||||
|
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||||
|
##
|
||||||
|
## How often (in ms) the job scheduler thread checks for jobs that need running.
|
||||||
|
## Set to 0 to globally disable scheduled jobs.
|
||||||
|
# JOB_POLL_INTERVAL_MS=30000
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that checks for Sends past their deletion date.
|
||||||
|
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||||
|
# SEND_PURGE_SCHEDULE="0 5 * * * *"
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that checks for trashed items to delete permanently.
|
||||||
|
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
|
||||||
|
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
|
||||||
|
|
||||||
## Enable extended logging, which shows timestamps and targets in the logs
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
# EXTENDED_LOGGING=true
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
|
## Timestamp format used in extended logging.
|
||||||
|
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||||
|
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||||
|
|
||||||
## Logging to file
|
## Logging to file
|
||||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||||
# LOG_FILE=/path/to/log
|
# LOG_FILE=/path/to/log
|
||||||
@@ -56,18 +92,22 @@
|
|||||||
## Log level
|
## Log level
|
||||||
## Change the verbosity of the log output
|
## Change the verbosity of the log output
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||||
## routes and static file, websocket and alive requests
|
## routes and static file, websocket and alive requests
|
||||||
# LOG_LEVEL=Info
|
# LOG_LEVEL=Info
|
||||||
|
|
||||||
## Enable WAL for the DB
|
## Enable WAL for the DB
|
||||||
## Set to false to avoid enabling WAL during startup.
|
## Set to false to avoid enabling WAL during startup.
|
||||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||||
## this setting only prevents bitwarden_rs from automatically enabling it on start.
|
## this setting only prevents vaultwarden from automatically enabling it on start.
|
||||||
## Please read project wiki page about this setting first before changing the value as it can
|
## Please read project wiki page about this setting first before changing the value as it can
|
||||||
## cause performance degradation or might render the service unable to start.
|
## cause performance degradation or might render the service unable to start.
|
||||||
# ENABLE_DB_WAL=true
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
|
## Database connection retries
|
||||||
|
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||||
|
# DB_CONNECTION_RETRIES=15
|
||||||
|
|
||||||
## Disable icon downloading
|
## Disable icon downloading
|
||||||
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
||||||
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||||
@@ -82,10 +122,11 @@
|
|||||||
## Icon blacklist Regex
|
## Icon blacklist Regex
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
|
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
## Any IP which is not defined as a global IP will be blacklisted.
|
||||||
## Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
## Disable 2FA remember
|
## Disable 2FA remember
|
||||||
@@ -93,6 +134,18 @@
|
|||||||
## Note that the checkbox would still be present, but ignored.
|
## Note that the checkbox would still be present, but ignored.
|
||||||
# DISABLE_2FA_REMEMBER=false
|
# DISABLE_2FA_REMEMBER=false
|
||||||
|
|
||||||
|
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||||
|
# EMAIL_ATTEMPTS_LIMIT=3
|
||||||
|
|
||||||
|
## Token expiration time
|
||||||
|
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||||
|
# EMAIL_EXPIRATION_TIME=600
|
||||||
|
|
||||||
|
## Email token size
|
||||||
|
## Number of digits in an email token (min: 6, max: 19).
|
||||||
|
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||||
|
# EMAIL_TOKEN_SIZE=6
|
||||||
|
|
||||||
## Controls if new users can register
|
## Controls if new users can register
|
||||||
# SIGNUPS_ALLOWED=true
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
@@ -114,6 +167,14 @@
|
|||||||
## even if SIGNUPS_ALLOWED is set to false
|
## even if SIGNUPS_ALLOWED is set to false
|
||||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||||
|
|
||||||
|
## Controls which users can create new orgs.
|
||||||
|
## Blank or 'all' means all users can create orgs (this is the default):
|
||||||
|
# ORG_CREATION_USERS=
|
||||||
|
## 'none' means no users can create orgs:
|
||||||
|
# ORG_CREATION_USERS=none
|
||||||
|
## A comma-separated list means only those users can create orgs:
|
||||||
|
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||||
|
|
||||||
## Token for the admin interface, preferably use a long random string
|
## Token for the admin interface, preferably use a long random string
|
||||||
## One option is to use 'openssl rand -base64 48'
|
## One option is to use 'openssl rand -base64 48'
|
||||||
## If not set, the admin panel is disabled
|
## If not set, the admin panel is disabled
|
||||||
@@ -125,6 +186,16 @@
|
|||||||
|
|
||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
# INVITATIONS_ALLOWED=true
|
# INVITATIONS_ALLOWED=true
|
||||||
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
|
# INVITATION_ORG_NAME=Vaultwarden
|
||||||
|
|
||||||
|
## Per-organization attachment limit (KB)
|
||||||
|
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||||
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user attachment limit (KB).
|
||||||
|
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
||||||
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
|
||||||
|
|
||||||
## Controls the PBBKDF password iterations to apply on the server
|
## Controls the PBBKDF password iterations to apply on the server
|
||||||
## The change only applies when the password is changed
|
## The change only applies when the password is changed
|
||||||
@@ -140,6 +211,13 @@
|
|||||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||||
# DOMAIN=https://bw.domain.tld:8443
|
# DOMAIN=https://bw.domain.tld:8443
|
||||||
|
|
||||||
|
## Allowed iframe ancestors (Know the risks!)
|
||||||
|
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||||
|
## Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
|
||||||
|
## This adds the configured value to the 'Content-Security-Policy' headers 'frame-ancestors' value.
|
||||||
|
## Multiple values must be separated with a whitespace.
|
||||||
|
# ALLOWED_IFRAME_ANCESTORS=
|
||||||
|
|
||||||
## Yubico (Yubikey) Settings
|
## Yubico (Yubikey) Settings
|
||||||
## Set your Client ID and Secret Key for Yubikey OTP
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||||
@@ -162,7 +240,7 @@
|
|||||||
## Authenticator Settings
|
## Authenticator Settings
|
||||||
## Disable authenticator time drifted codes to be valid.
|
## Disable authenticator time drifted codes to be valid.
|
||||||
## TOTP codes of the previous and next 30 seconds will be invalid
|
## TOTP codes of the previous and next 30 seconds will be invalid
|
||||||
##
|
##
|
||||||
## According to the RFC6238 (https://tools.ietf.org/html/rfc6238),
|
## According to the RFC6238 (https://tools.ietf.org/html/rfc6238),
|
||||||
## we allow by default the TOTP code which was valid one step back and one in the future.
|
## we allow by default the TOTP code which was valid one step back and one in the future.
|
||||||
## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes.
|
## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes.
|
||||||
@@ -181,13 +259,47 @@
|
|||||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||||
# SMTP_HOST=smtp.domain.tld
|
# SMTP_HOST=smtp.domain.tld
|
||||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
# SMTP_FROM=vaultwarden@domain.tld
|
||||||
# SMTP_FROM_NAME=Bitwarden_RS
|
# SMTP_FROM_NAME=Vaultwarden
|
||||||
# SMTP_PORT=587
|
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||||
# SMTP_SSL=true
|
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||||
|
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||||
# SMTP_USERNAME=username
|
# SMTP_USERNAME=username
|
||||||
# SMTP_PASSWORD=password
|
# SMTP_PASSWORD=password
|
||||||
# SMTP_AUTH_MECHANISM="Plain"
|
|
||||||
# SMTP_TIMEOUT=15
|
# SMTP_TIMEOUT=15
|
||||||
|
|
||||||
|
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||||
|
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||||
|
## Multiple options need to be separated by a comma ','.
|
||||||
|
# SMTP_AUTH_MECHANISM="Plain"
|
||||||
|
|
||||||
|
## Server name sent during the SMTP HELO
|
||||||
|
## By default this value should be is on the machine's hostname,
|
||||||
|
## but might need to be changed in case it trips some anti-spam filters
|
||||||
|
# HELO_NAME=
|
||||||
|
|
||||||
|
## SMTP debugging
|
||||||
|
## When set to true this will output very detailed SMTP messages.
|
||||||
|
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
|
# SMTP_DEBUG=false
|
||||||
|
|
||||||
|
## Accept Invalid Hostnames
|
||||||
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
|
## Accept Invalid Certificates
|
||||||
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||||
|
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||||
|
|
||||||
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
|
## If sending the email fails the login attempt will fail!!
|
||||||
|
# REQUIRE_DEVICE_EMAIL=false
|
||||||
|
|
||||||
|
## HIBP Api Key
|
||||||
|
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||||
|
# HIBP_API_KEY=
|
||||||
|
|
||||||
# vim: syntax=ini
|
# vim: syntax=ini
|
||||||
|
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Ignore vendored scripts in GitHub stats
|
||||||
|
src/static/scripts/* linguist-vendored
|
||||||
|
|
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@@ -1 +1,2 @@
|
|||||||
github: dani-garcia
|
github: dani-garcia
|
||||||
|
custom: ["https://paypal.me/DaniGG"]
|
||||||
|
33
.github/ISSUE_TEMPLATE.md
vendored
33
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,33 +0,0 @@
|
|||||||
<!--
|
|
||||||
Please fill out the following template to make solving your problem easier and faster for us.
|
|
||||||
This is only a guideline. If you think that parts are unneccessary for your issue, feel free to remove them.
|
|
||||||
|
|
||||||
Remember to hide/obfuscate personal and confidential information,
|
|
||||||
such as names, global IP/DNS adresses and especially passwords, if neccessary.
|
|
||||||
-->
|
|
||||||
|
|
||||||
### Subject of the issue
|
|
||||||
<!-- Describe your issue here.-->
|
|
||||||
|
|
||||||
### Your environment
|
|
||||||
<!-- The version number, obtained from the logs or the admin page -->
|
|
||||||
* Bitwarden_rs version:
|
|
||||||
<!-- How the server was installed: Docker image / package / built from source -->
|
|
||||||
* Install method:
|
|
||||||
* Clients used: <!-- if applicable -->
|
|
||||||
* Reverse proxy and version: <!-- if applicable -->
|
|
||||||
* Version of mysql/postgresql: <!-- if applicable -->
|
|
||||||
* Other relevant information:
|
|
||||||
|
|
||||||
### Steps to reproduce
|
|
||||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
|
||||||
and how did you start bitwarden_rs? -->
|
|
||||||
|
|
||||||
### Expected behaviour
|
|
||||||
<!-- Tell us what should happen -->
|
|
||||||
|
|
||||||
### Actual behaviour
|
|
||||||
<!-- Tell us what happens instead -->
|
|
||||||
|
|
||||||
### Relevant logs
|
|
||||||
<!-- Share some logfiles, screenshots or output of relevant programs with us. -->
|
|
66
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
66
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
<!--
|
||||||
|
# ###
|
||||||
|
NOTE: Please update to the latest version of vaultwarden before reporting an issue!
|
||||||
|
This saves you and us a lot of time and troubleshooting.
|
||||||
|
See:
|
||||||
|
* https://github.com/dani-garcia/vaultwarden/issues/1180
|
||||||
|
* https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image
|
||||||
|
# ###
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please fill out the following template to make solving your problem easier and faster for us.
|
||||||
|
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
||||||
|
|
||||||
|
Remember to hide/redact personal or confidential information,
|
||||||
|
such as passwords, IP addresses, and DNS names as appropriate.
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Subject of the issue
|
||||||
|
<!-- Describe your issue here. -->
|
||||||
|
|
||||||
|
### Deployment environment
|
||||||
|
|
||||||
|
<!--
|
||||||
|
=========================================================================================
|
||||||
|
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
|
||||||
|
That will auto-generate most of the info requested in this section.
|
||||||
|
=========================================================================================
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
||||||
|
<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden -->
|
||||||
|
<!-- Remember to check if your issue exists on the latest version first! -->
|
||||||
|
* vaultwarden version:
|
||||||
|
|
||||||
|
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
||||||
|
* Install method:
|
||||||
|
|
||||||
|
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
|
||||||
|
|
||||||
|
* Reverse proxy and version: <!-- if applicable -->
|
||||||
|
|
||||||
|
* MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
|
||||||
|
|
||||||
|
* Other relevant details:
|
||||||
|
|
||||||
|
### Steps to reproduce
|
||||||
|
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
||||||
|
and how did you start vaultwarden? -->
|
||||||
|
|
||||||
|
### Expected behaviour
|
||||||
|
<!-- Tell us what you expected to happen -->
|
||||||
|
|
||||||
|
### Actual behaviour
|
||||||
|
<!-- Tell us what actually happened -->
|
||||||
|
|
||||||
|
### Troubleshooting data
|
||||||
|
<!-- Share any log files, screenshots, or other relevant troubleshooting data -->
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: Discourse forum for bitwarden_rs
|
||||||
|
url: https://bitwardenrs.discourse.group/
|
||||||
|
about: Use this forum to request features or get help with usage/configuration.
|
||||||
|
- name: GitHub Discussions for vaultwarden
|
||||||
|
url: https://github.com/dani-garcia/vaultwarden/discussions
|
||||||
|
about: An alternative to the Discourse forum, if this is easier for you.
|
151
.github/workflows/build.yml
vendored
Normal file
151
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
name: Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths-ignore:
|
||||||
|
- "**.md"
|
||||||
|
- "**.txt"
|
||||||
|
- ".dockerignore"
|
||||||
|
- ".env.template"
|
||||||
|
- ".gitattributes"
|
||||||
|
- ".gitignore"
|
||||||
|
- "azure-pipelines.yml"
|
||||||
|
- "docker/**"
|
||||||
|
- "hooks/**"
|
||||||
|
- "tools/**"
|
||||||
|
- ".github/FUNDING.yml"
|
||||||
|
- ".github/ISSUE_TEMPLATE/**"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
channel:
|
||||||
|
- nightly
|
||||||
|
# - stable
|
||||||
|
target-triple:
|
||||||
|
- x86_64-unknown-linux-gnu
|
||||||
|
# - x86_64-unknown-linux-musl
|
||||||
|
include:
|
||||||
|
- target-triple: x86_64-unknown-linux-gnu
|
||||||
|
host-triple: x86_64-unknown-linux-gnu
|
||||||
|
features: "sqlite,mysql,postgresql"
|
||||||
|
channel: nightly
|
||||||
|
os: ubuntu-18.04
|
||||||
|
ext:
|
||||||
|
# - target-triple: x86_64-unknown-linux-gnu
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,mysql,postgresql"
|
||||||
|
# channel: stable
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext:
|
||||||
|
# - target-triple: x86_64-unknown-linux-musl
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,postgresql"
|
||||||
|
# channel: nightly
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext:
|
||||||
|
# - target-triple: x86_64-unknown-linux-musl
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,postgresql"
|
||||||
|
# channel: stable
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext:
|
||||||
|
|
||||||
|
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
|
# Install musl-tools when needed
|
||||||
|
- name: Install musl tools
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
|
||||||
|
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
|
||||||
|
# End Install musl-tools when needed
|
||||||
|
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
- name: Install dependencies Ubuntu
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||||
|
if: startsWith( matrix.os, 'ubuntu' )
|
||||||
|
# End Install dependencies
|
||||||
|
|
||||||
|
|
||||||
|
# Enable Rust Caching
|
||||||
|
- uses: Swatinem/rust-cache@v1
|
||||||
|
# End Enable Rust Caching
|
||||||
|
|
||||||
|
|
||||||
|
# Uses the rust-toolchain file to determine version
|
||||||
|
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||||
|
uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
target: ${{ matrix.target-triple }}
|
||||||
|
components: clippy, rustfmt
|
||||||
|
# End Uses the rust-toolchain file to determine version
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo tests (In release mode to speed up future builds)
|
||||||
|
- name: '`cargo test --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||||
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo clippy (In release mode to speed up future builds)
|
||||||
|
- name: '`cargo clippy --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: clippy
|
||||||
|
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||||
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo fmt
|
||||||
|
- name: '`cargo fmt`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: fmt
|
||||||
|
args: --all -- --check
|
||||||
|
# End Run cargo fmt
|
||||||
|
|
||||||
|
|
||||||
|
# Build the binary
|
||||||
|
- name: '`cargo build --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: build
|
||||||
|
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||||
|
# End Build the binary
|
||||||
|
|
||||||
|
|
||||||
|
# Upload artifact to Github Actions
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||||
|
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||||
|
# End Upload artifact to Github Actions
|
||||||
|
|
||||||
|
|
||||||
|
## This is not used at the moment
|
||||||
|
## We could start using this when we can build static binaries
|
||||||
|
# Upload to github actions release
|
||||||
|
# - name: Release
|
||||||
|
# uses: Shopify/upload-to-release@1
|
||||||
|
# if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
# with:
|
||||||
|
# name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||||
|
# path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||||
|
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
# End Upload to github actions release
|
35
.github/workflows/hadolint.yml
vendored
Normal file
35
.github/workflows/hadolint.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
name: Hadolint
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths:
|
||||||
|
- "docker/**"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
hadolint:
|
||||||
|
name: Validate Dockerfile syntax
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
|
# Download hadolint
|
||||||
|
- name: Download hadolint
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||||
|
sudo chmod +x /usr/local/bin/hadolint
|
||||||
|
env:
|
||||||
|
HADOLINT_VERSION: 2.0.0
|
||||||
|
# End Download hadolint
|
||||||
|
|
||||||
|
# Test Dockerfiles
|
||||||
|
- name: Run hadolint
|
||||||
|
shell: bash
|
||||||
|
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored | xargs hadolint
|
||||||
|
# End Test Dockerfiles
|
70
.github/workflows/rust-win.yml.disabled
vendored
70
.github/workflows/rust-win.yml.disabled
vendored
@@ -1,70 +0,0 @@
|
|||||||
name: build-windows
|
|
||||||
|
|
||||||
on: [push, pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
|
|
||||||
runs-on: windows-latest
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
db-backend: [sqlite, mysql, postgresql]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v1
|
|
||||||
|
|
||||||
- name: Cache choco cache
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: ~\AppData\Local\Temp\chocolatey
|
|
||||||
key: ${{ runner.os }}-choco-cache
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: choco install openssl sqlite postgresql12 mysql
|
|
||||||
|
|
||||||
- name: Cache cargo registry
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/registry
|
|
||||||
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Cache cargo index
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/git
|
|
||||||
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Cache cargo build
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: target
|
|
||||||
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Install latest nightly
|
|
||||||
uses: actions-rs/toolchain@v1
|
|
||||||
with:
|
|
||||||
toolchain: nightly
|
|
||||||
override: true
|
|
||||||
profile: minimal
|
|
||||||
target: x86_64-pc-windows-msvc
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
run: cargo.exe build --verbose --features ${{ matrix.db-backend }} --release --target x86_64-pc-windows-msvc
|
|
||||||
env:
|
|
||||||
OPENSSL_DIR: C:\Program Files\OpenSSL-Win64\
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: cargo test --features ${{ matrix.db-backend }}
|
|
||||||
|
|
||||||
- name: Upload windows artifact
|
|
||||||
uses: actions/upload-artifact@v1.0.0
|
|
||||||
with:
|
|
||||||
name: x86_64-pc-windows-msvc-${{ matrix.db-backend }}-bitwarden_rs
|
|
||||||
path: target/release/bitwarden_rs.exe
|
|
||||||
|
|
||||||
- name: Release
|
|
||||||
uses: Shopify/upload-to-release@1.0.0
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
with:
|
|
||||||
name: x86_64-pc-windows-msvc-${{ matrix.db-backend }}-bitwarden_rs
|
|
||||||
path: target/release/bitwarden_rs.exe
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
149
.github/workflows/workspace.yml
vendored
149
.github/workflows/workspace.yml
vendored
@@ -1,149 +0,0 @@
|
|||||||
name: Workflow
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
paths-ignore:
|
|
||||||
- "**.md"
|
|
||||||
pull_request:
|
|
||||||
paths-ignore:
|
|
||||||
- "**.md"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Build
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
db-backend: [sqlite, mysql, postgresql]
|
|
||||||
target:
|
|
||||||
- x86_64-unknown-linux-gnu
|
|
||||||
# - x86_64-unknown-linux-musl
|
|
||||||
- x86_64-apple-darwin
|
|
||||||
# - x86_64-pc-windows-msvc
|
|
||||||
include:
|
|
||||||
- target: x86_64-unknown-linux-gnu
|
|
||||||
os: ubuntu-latest
|
|
||||||
ext:
|
|
||||||
# - target: x86_64-unknown-linux-musl
|
|
||||||
# os: ubuntu-latest
|
|
||||||
# ext:
|
|
||||||
- target: x86_64-apple-darwin
|
|
||||||
os: macOS-latest
|
|
||||||
ext:
|
|
||||||
# - target: x86_64-pc-windows-msvc
|
|
||||||
# os: windows-latest
|
|
||||||
# ext: .exe
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v1
|
|
||||||
|
|
||||||
# - name: Cache choco cache
|
|
||||||
# uses: actions/cache@v1.0.3
|
|
||||||
# if: matrix.os == 'windows-latest'
|
|
||||||
# with:
|
|
||||||
# path: ~\AppData\Local\Temp\chocolatey
|
|
||||||
# key: ${{ runner.os }}-choco-cache-${{ matrix.db-backend }}
|
|
||||||
|
|
||||||
- name: Cache vcpkg installed
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
with:
|
|
||||||
path: $VCPKG_ROOT/installed
|
|
||||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
|
||||||
env:
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
|
|
||||||
- name: Cache vcpkg downloads
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
with:
|
|
||||||
path: $VCPKG_ROOT/downloads
|
|
||||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
|
||||||
env:
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
|
|
||||||
# - name: Cache homebrew
|
|
||||||
# uses: actions/cache@v1.0.3
|
|
||||||
# if: matrix.os == 'macOS-latest'
|
|
||||||
# with:
|
|
||||||
# path: ~/Library/Caches/Homebrew
|
|
||||||
# key: ${{ runner.os }}-brew-cache
|
|
||||||
|
|
||||||
# - name: Cache apt
|
|
||||||
# uses: actions/cache@v1.0.3
|
|
||||||
# if: matrix.os == 'ubuntu-latest'
|
|
||||||
# with:
|
|
||||||
# path: /var/cache/apt/archives
|
|
||||||
# key: ${{ runner.os }}-apt-cache
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
- name: Install dependencies macOS
|
|
||||||
run: brew update; brew install openssl sqlite libpq mysql
|
|
||||||
if: matrix.os == 'macOS-latest'
|
|
||||||
|
|
||||||
- name: Install dependencies Ubuntu
|
|
||||||
run: sudo apt-get update && sudo apt-get install --no-install-recommends openssl sqlite libpq-dev libmysql++-dev
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
|
||||||
|
|
||||||
- name: Install dependencies Windows
|
|
||||||
run: vcpkg integrate install; vcpkg install sqlite3:x64-windows openssl:x64-windows libpq:x64-windows libmysql:x64-windows
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
env:
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
# End Install dependencies
|
|
||||||
|
|
||||||
# Install rust nightly toolchain
|
|
||||||
- name: Cache cargo registry
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/registry
|
|
||||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Cache cargo index
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/git
|
|
||||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Cache cargo build
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: target
|
|
||||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Install latest nightly
|
|
||||||
uses: actions-rs/toolchain@v1
|
|
||||||
with:
|
|
||||||
toolchain: nightly
|
|
||||||
override: true
|
|
||||||
profile: minimal
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
|
|
||||||
# Build
|
|
||||||
- name: Build Win
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
run: cargo.exe build --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
|
||||||
env:
|
|
||||||
RUSTFLAGS: -Ctarget-feature=+crt-static
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
|
|
||||||
- name: Build macOS / Ubuntu
|
|
||||||
if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest'
|
|
||||||
run: cargo build --verbose --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
|
||||||
|
|
||||||
# Test
|
|
||||||
- name: Run tests
|
|
||||||
run: cargo test --features ${{ matrix.db-backend }}
|
|
||||||
|
|
||||||
# Upload & Release
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-artifact@v1.0.0
|
|
||||||
with:
|
|
||||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
|
||||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
|
||||||
|
|
||||||
- name: Release
|
|
||||||
uses: Shopify/upload-to-release@1.0.0
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
with:
|
|
||||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
|
||||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
21
.travis.yml
21
.travis.yml
@@ -1,21 +0,0 @@
|
|||||||
dist: xenial
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
- HADOLINT_VERSION=1.17.1
|
|
||||||
|
|
||||||
language: rust
|
|
||||||
rust: nightly
|
|
||||||
cache: cargo
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
|
|
||||||
- sudo chmod +rx /usr/local/bin/hadolint
|
|
||||||
- rustup set profile minimal
|
|
||||||
|
|
||||||
# Nothing to install
|
|
||||||
install: true
|
|
||||||
script:
|
|
||||||
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
|
|
||||||
- cargo test --features "sqlite"
|
|
||||||
- cargo test --features "mysql"
|
|
3515
Cargo.lock
generated
3515
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
116
Cargo.toml
116
Cargo.toml
@@ -1,10 +1,10 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bitwarden_rs"
|
name = "vaultwarden"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/bitwarden_rs"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = "GPL-3.0-only"
|
license = "GPL-3.0-only"
|
||||||
publish = false
|
publish = false
|
||||||
@@ -14,8 +14,14 @@ build = "build.rs"
|
|||||||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||||
enable_syslog = []
|
enable_syslog = []
|
||||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres", "openssl"]
|
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||||
|
# Enable to use a vendored and statically linked openssl
|
||||||
|
vendored_openssl = ["openssl/vendored"]
|
||||||
|
|
||||||
|
# Enable unstable features, requires nightly
|
||||||
|
# Currently only used to enable rusts official ip support
|
||||||
|
unstable = []
|
||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(not(windows))".dependencies]
|
||||||
syslog = "4.0.1"
|
syslog = "4.0.1"
|
||||||
@@ -26,101 +32,117 @@ rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
|||||||
rocket_contrib = "0.5.0-dev"
|
rocket_contrib = "0.5.0-dev"
|
||||||
|
|
||||||
# HTTP client
|
# HTTP client
|
||||||
reqwest = "0.9.24"
|
reqwest = { version = "0.11.3", features = ["blocking", "json", "gzip", "brotli", "socks"] }
|
||||||
|
|
||||||
# multipart/form-data support
|
# multipart/form-data support
|
||||||
multipart = { version = "0.16.1", features = ["server"], default-features = false }
|
multipart = { version = "0.17.1", features = ["server"], default-features = false }
|
||||||
|
|
||||||
# WebSockets library
|
# WebSockets library
|
||||||
ws = "0.9.1"
|
ws = { version = "0.10.0", package = "parity-ws" }
|
||||||
|
|
||||||
# MessagePack library
|
# MessagePack library
|
||||||
rmpv = "0.4.3"
|
rmpv = "0.4.7"
|
||||||
|
|
||||||
# Concurrent hashmap implementation
|
# Concurrent hashmap implementation
|
||||||
chashmap = "2.2.2"
|
chashmap = "2.2.2"
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = "1.0.104"
|
serde = { version = "1.0.125", features = ["derive"] }
|
||||||
serde_derive = "1.0.104"
|
serde_json = "1.0.64"
|
||||||
serde_json = "1.0.44"
|
|
||||||
|
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.8"
|
log = "0.4.14"
|
||||||
fern = { version = "0.5.9", features = ["syslog-4"] }
|
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "1.4.3", features = [ "chrono", "r2d2"] }
|
diesel = { version = "1.4.6", features = [ "chrono", "r2d2"] }
|
||||||
diesel_migrations = "1.4.0"
|
diesel_migrations = "1.4.0"
|
||||||
|
|
||||||
# Bundled SQLite
|
# Bundled SQLite
|
||||||
libsqlite3-sys = { version = "0.16.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.20.1", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto library
|
# Crypto-related libraries
|
||||||
ring = "0.14.6"
|
rand = "0.8.3"
|
||||||
|
ring = "0.16.20"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "0.8.1", features = ["v4"] }
|
uuid = { version = "0.8.2", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time library for Rust
|
# Date and time libraries
|
||||||
chrono = "0.4.10"
|
chrono = { version = "0.4.19", features = ["serde"] }
|
||||||
|
chrono-tz = "0.5.3"
|
||||||
|
time = "0.2.26"
|
||||||
|
|
||||||
|
# Job scheduler
|
||||||
|
job_scheduler = "1.2.1"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
oath = "0.10.2"
|
oath = "0.10.2"
|
||||||
|
|
||||||
# Data encoding library
|
# Data encoding library
|
||||||
data-encoding = "2.1.2"
|
data-encoding = "2.3.2"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "6.0.1"
|
jsonwebtoken = "7.2.0"
|
||||||
|
|
||||||
# U2F library
|
# U2F library
|
||||||
u2f = "0.1.6"
|
u2f = "0.2.0"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.7.1", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenv = { version = "0.15.0", default-features = false }
|
dotenv = { version = "0.15.0", default-features = false }
|
||||||
|
|
||||||
# Lazy static macro
|
# Lazy initialization
|
||||||
lazy_static = "1.4.0"
|
once_cell = "1.7.2"
|
||||||
|
|
||||||
# More derives
|
|
||||||
derive_more = "0.99.2"
|
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.10"
|
num-traits = "0.2.14"
|
||||||
num-derive = "0.3.0"
|
num-derive = "0.3.3"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = "0.9.2"
|
tracing = { version = "0.1.25", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled.
|
||||||
lettre_email = "0.9.2"
|
lettre = { version = "0.10.0-beta.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||||
native-tls = "0.2.3"
|
newline-converter = "0.2.0"
|
||||||
quoted_printable = "0.4.1"
|
|
||||||
|
|
||||||
# Template library
|
# Template library
|
||||||
handlebars = "=2.0.2"
|
handlebars = { version = "3.5.4", features = ["dir_source"] }
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
soup = "0.4.1"
|
html5ever = "0.25.1"
|
||||||
regex = "1.3.1"
|
markup5ever_rcdom = "0.1.0"
|
||||||
|
regex = { version = "1.4.5", features = ["std", "perf"], default-features = false }
|
||||||
data-url = "0.1.0"
|
data-url = "0.1.0"
|
||||||
|
|
||||||
# Required for SSL support for PostgreSQL
|
# Used by U2F, JWT and Postgres
|
||||||
openssl = { version = "0.10.26", optional = true }
|
openssl = "0.10.34"
|
||||||
|
|
||||||
# URL encoding library
|
# URL encoding library
|
||||||
percent-encoding = "2.1.0"
|
percent-encoding = "2.1.0"
|
||||||
|
# Punycode conversion
|
||||||
|
idna = "0.2.2"
|
||||||
|
|
||||||
|
# CLI argument parsing
|
||||||
|
pico-args = "0.4.0"
|
||||||
|
|
||||||
|
# Logging panics to logfile instead stderr only
|
||||||
|
backtrace = "0.3.56"
|
||||||
|
|
||||||
|
# Macro ident concatenation
|
||||||
|
paste = "1.0.5"
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# Use newest ring
|
# Use newest ring
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
|
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
|
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||||
|
|
||||||
# Use git version for timeout fix #706
|
|
||||||
lettre = { git = 'https://github.com/lettre/lettre', rev = '24d694db3be017d82b1cdc8bf9da601420b31bb0' }
|
|
||||||
lettre_email = { git = 'https://github.com/lettre/lettre', rev = '24d694db3be017d82b1cdc8bf9da601420b31bb0' }
|
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '7f1bd6ce1c2fde599a757302a843a60e714c5f72' }
|
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
||||||
|
|
||||||
|
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
|
||||||
|
# to any issues or PRs for almost a year (as of April 2021). This hopefully
|
||||||
|
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
|
||||||
|
# In particular, `cron` has since implemented parsing of some common syntax
|
||||||
|
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
|
||||||
|
job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' }
|
||||||
|
@@ -1 +1 @@
|
|||||||
docker/amd64/sqlite/Dockerfile
|
docker/amd64/Dockerfile
|
64
README.md
64
README.md
@@ -1,19 +1,18 @@
|
|||||||
### This is a Bitwarden server API implementation written in Rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
[](https://travis-ci.org/dani-garcia/bitwarden_rs)
|
[](https://hub.docker.com/r/vaultwarden/server)
|
||||||
[](https://hub.docker.com/r/bitwardenrs/server)
|
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||||
[](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
|
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||||
[](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
|
[](https://github.com/dani-garcia/vaultwarden/blob/master/LICENSE.txt)
|
||||||
[](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
|
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||||
[](https://matrix.to/#/#bitwarden_rs:matrix.org)
|
|
||||||
|
|
||||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
|
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||||
|
|
||||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||||
|
|
||||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any Bitwarden related bug-reports or suggestions [here](https://github.com/dani-garcia/bitwarden_rs/issues/new), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -21,40 +20,69 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
|||||||
|
|
||||||
Basically full implementation of Bitwarden API is provided including:
|
Basically full implementation of Bitwarden API is provided including:
|
||||||
|
|
||||||
* Basic single user functionality
|
|
||||||
* Organizations support
|
* Organizations support
|
||||||
* Attachments
|
* Attachments
|
||||||
* Vault API support
|
* Vault API support
|
||||||
* Serving the static files for Vault interface
|
* Serving the static files for Vault interface
|
||||||
* Website icons API
|
* Website icons API
|
||||||
* Authenticator and U2F support
|
* Authenticator and U2F support
|
||||||
* YubiKey OTP
|
* YubiKey and Duo support
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
Pull the docker image and mount a volume from the host for persistent storage:
|
Pull the docker image and mount a volume from the host for persistent storage:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker pull bitwardenrs/server:latest
|
docker pull vaultwarden/server:latest
|
||||||
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 bitwardenrs/server:latest
|
docker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server:latest
|
||||||
```
|
```
|
||||||
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.
|
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
||||||
|
|
||||||
This can be configured in [bitwarden_rs directly](https://github.com/dani-garcia/bitwarden_rs/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/bitwarden_rs/wiki/Proxy-examples)).
|
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||||
|
|
||||||
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) for more information on how to configure and run the bitwarden_rs server.
|
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||||
|
|
||||||
## Get in touch
|
## Get in touch
|
||||||
|
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://vaultwarden.discourse.group/).
|
||||||
|
|
||||||
To ask a question, [raising an issue](https://github.com/dani-garcia/bitwarden_rs/issues/new) is fine. Please also report any bugs spotted here.
|
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure there aren't any similar issues open, though!
|
||||||
|
|
||||||
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
|
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||||
|
|
||||||
### Sponsors
|
### Sponsors
|
||||||
Thanks for your contribution to the project!
|
Thanks for your contribution to the project!
|
||||||
|
|
||||||
- [@Skaronator](https://github.com/Skaronator)
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/netdadaltd">
|
||||||
|
<img src="https://avatars.githubusercontent.com/u/77323954?s=75&v=4" width="75px;" alt="netdadaltd"/>
|
||||||
|
<br />
|
||||||
|
<sub><b>netDada Ltd.</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
<br/>
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/ChonoN" style="width: 75px">
|
||||||
|
<sub><b>ChonoN</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/themightychris">
|
||||||
|
<sub><b>themightychris</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
pool:
|
pool:
|
||||||
vmImage: 'Ubuntu-16.04'
|
vmImage: 'Ubuntu-18.04'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- script: |
|
- script: |
|
||||||
@@ -10,16 +10,13 @@ steps:
|
|||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y libmysql++-dev
|
sudo apt-get install -y --no-install-recommends build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||||
displayName: Install libmysql
|
displayName: 'Install build libraries.'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
rustc -Vv
|
rustc -Vv
|
||||||
cargo -V
|
cargo -V
|
||||||
displayName: Query rust and cargo versions
|
displayName: Query rust and cargo versions
|
||||||
|
|
||||||
- script : cargo test --features "sqlite"
|
- script : cargo test --features "sqlite,mysql,postgresql"
|
||||||
displayName: 'Test project with sqlite backend'
|
displayName: 'Test project with sqlite, mysql and postgresql backends'
|
||||||
|
|
||||||
- script : cargo test --features "mysql"
|
|
||||||
displayName: 'Test project with mysql backend'
|
|
||||||
|
33
build.rs
33
build.rs
@@ -1,17 +1,26 @@
|
|||||||
|
use std::env;
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||||
compile_error!("Can't enable both sqlite and mysql at the same time");
|
#[cfg(feature = "sqlite")]
|
||||||
#[cfg(all(feature = "sqlite", feature = "postgresql"))]
|
println!("cargo:rustc-cfg=sqlite");
|
||||||
compile_error!("Can't enable both sqlite and postgresql at the same time");
|
#[cfg(feature = "mysql")]
|
||||||
#[cfg(all(feature = "mysql", feature = "postgresql"))]
|
println!("cargo:rustc-cfg=mysql");
|
||||||
compile_error!("Can't enable both mysql and postgresql at the same time");
|
#[cfg(feature = "postgresql")]
|
||||||
|
println!("cargo:rustc-cfg=postgresql");
|
||||||
|
|
||||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
compile_error!(
|
||||||
|
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||||
|
);
|
||||||
|
|
||||||
read_git_info().ok();
|
if let Ok(version) = env::var("BWRS_VERSION") {
|
||||||
|
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||||
|
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||||
|
} else {
|
||||||
|
read_git_info().ok();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||||
@@ -49,19 +58,21 @@ fn read_git_info() -> Result<(), std::io::Error> {
|
|||||||
// Combined version
|
// Combined version
|
||||||
let version = if let Some(exact) = exact_tag {
|
let version = if let Some(exact) = exact_tag {
|
||||||
exact
|
exact
|
||||||
} else if &branch != "master" {
|
} else if &branch != "main" && &branch != "master" {
|
||||||
format!("{}-{} ({})", last_tag, rev_short, branch)
|
format!("{}-{} ({})", last_tag, rev_short, branch)
|
||||||
} else {
|
} else {
|
||||||
format!("{}-{}", last_tag, rev_short)
|
format!("{}-{}", last_tag, rev_short)
|
||||||
};
|
};
|
||||||
println!("cargo:rustc-env=GIT_VERSION={}", version);
|
|
||||||
|
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||||
|
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||||
|
|
||||||
// To access these values, use:
|
// To access these values, use:
|
||||||
// env!("GIT_EXACT_TAG")
|
// env!("GIT_EXACT_TAG")
|
||||||
// env!("GIT_LAST_TAG")
|
// env!("GIT_LAST_TAG")
|
||||||
// env!("GIT_BRANCH")
|
// env!("GIT_BRANCH")
|
||||||
// env!("GIT_REV")
|
// env!("GIT_REV")
|
||||||
// env!("GIT_VERSION")
|
// env!("BWRS_VERSION")
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
33
docker/Dockerfile.buildx
Normal file
33
docker/Dockerfile.buildx
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||||
|
# manifest, rather than the target arch. For example:
|
||||||
|
#
|
||||||
|
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||||
|
# amd64
|
||||||
|
#
|
||||||
|
# Recent versions of Docker have started printing a warning when the image's
|
||||||
|
# claimed arch doesn't match the host arch. For example:
|
||||||
|
#
|
||||||
|
# WARNING: The requested image's platform (linux/amd64) does not match the
|
||||||
|
# detected host platform (linux/arm/v7) and no specific platform was requested
|
||||||
|
#
|
||||||
|
# The image still works fine, but the spurious warning creates confusion.
|
||||||
|
#
|
||||||
|
# Docker doesn't seem to provide a way to directly set the arch of an image
|
||||||
|
# at build time. To resolve the build vs. target arch discrepancy, we use
|
||||||
|
# Docker Buildx to build a new set of images with the correct target arch.
|
||||||
|
#
|
||||||
|
# Docker Buildx uses this Dockerfile to build an image for each requested
|
||||||
|
# platform. Since the Dockerfile basically consists of a single `FROM`
|
||||||
|
# instruction, we're effectively telling Buildx to build a platform-specific
|
||||||
|
# image by simply copying the existing cross-built image and setting the
|
||||||
|
# correct target arch as a side effect.
|
||||||
|
#
|
||||||
|
# References:
|
||||||
|
#
|
||||||
|
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
||||||
|
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
||||||
|
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
||||||
|
#
|
||||||
|
ARG LOCAL_REPO
|
||||||
|
ARG DOCKER_TAG
|
||||||
|
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
265
docker/Dockerfile.j2
Normal file
265
docker/Dockerfile.j2
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
{% set build_stage_base_image = "rust:1.51" %}
|
||||||
|
{% if "alpine" in target_file %}
|
||||||
|
{% if "amd64" in target_file %}
|
||||||
|
{% set build_stage_base_image = "clux/muslrust:nightly-2021-04-14" %}
|
||||||
|
{% set runtime_stage_base_image = "alpine:3.13" %}
|
||||||
|
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||||
|
{% elif "armv7" in target_file %}
|
||||||
|
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.13" %}
|
||||||
|
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||||
|
{% endif %}
|
||||||
|
{% elif "amd64" in target_file %}
|
||||||
|
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||||
|
{% elif "arm64" in target_file %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||||
|
{% set package_arch_name = "arm64" %}
|
||||||
|
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||||
|
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||||
|
{% elif "armv6" in target_file %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||||
|
{% set package_arch_name = "armel" %}
|
||||||
|
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||||
|
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||||
|
{% elif "armv7" in target_file %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||||
|
{% set package_arch_name = "armhf" %}
|
||||||
|
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||||
|
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||||
|
{% endif %}
|
||||||
|
{% if package_arch_name is defined %}
|
||||||
|
{% set package_arch_prefix = ":" + package_arch_name %}
|
||||||
|
{% else %}
|
||||||
|
{% set package_arch_prefix = "" %}
|
||||||
|
{% endif %}
|
||||||
|
{% if package_arch_target is defined %}
|
||||||
|
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
||||||
|
{% else %}
|
||||||
|
{% set package_arch_target_param = "" %}
|
||||||
|
{% endif %}
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
{% set vault_version = "2.19.0d" %}
|
||||||
|
{% set vault_image_digest = "sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233" %}
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v{{ vault_version }}
|
||||||
|
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:v{{ vault_version }}
|
||||||
|
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
|
# [vaultwarden/web-vault:v{{ vault_version }}]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM {{ build_stage_base_image }} as build
|
||||||
|
|
||||||
|
{% if "alpine" in target_file %}
|
||||||
|
{% if "amd64" in target_file %}
|
||||||
|
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||||
|
ARG DB=sqlite,postgresql
|
||||||
|
{% set features = "sqlite,postgresql" %}
|
||||||
|
{% else %}
|
||||||
|
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||||
|
ARG DB=sqlite
|
||||||
|
{% set features = "sqlite" %}
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# Debian-based builds support multidb
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% set features = "sqlite,mysql,postgresql" %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
{% if "alpine" in target_file %}
|
||||||
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
{% if "armv7" in target_file %}
|
||||||
|
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||||
|
{% endif %}
|
||||||
|
{% elif "arm" in target_file %}
|
||||||
|
# Install required build libs for {{ package_arch_name }} architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture {{ package_arch_name }} \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev{{ package_arch_prefix }} \
|
||||||
|
libc6-dev{{ package_arch_prefix }} \
|
||||||
|
libpq5{{ package_arch_prefix }} \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev{{ package_arch_prefix }} \
|
||||||
|
libmariadb-dev-compat{{ package_arch_prefix }}
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-{{ package_cross_compiler }} \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if "amd64" in target_file and "alpine" not in target_file %}
|
||||||
|
# Install DB packages
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libmariadb-dev{{ package_arch_prefix }} \
|
||||||
|
libpq-dev{{ package_arch_prefix }} \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
{% if "alpine" not in target_file %}
|
||||||
|
{% if "arm" in target_file %}
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
||||||
|
|
||||||
|
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||||
|
{% endif -%}
|
||||||
|
{% endif %}
|
||||||
|
{% if package_arch_target is defined %}
|
||||||
|
RUN rustup target add {{ package_arch_target }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||||
|
{% if "alpine" in target_file %}
|
||||||
|
{% if "armv7" in target_file %}
|
||||||
|
RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM {{ runtime_stage_base_image }}
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
{% if "alpine" in runtime_stage_base_image %}
|
||||||
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if "amd64" not in target_file %}
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
# Install needed libraries
|
||||||
|
{% if "alpine" in runtime_stage_base_image %}
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
{% if "mysql" in features %}
|
||||||
|
mariadb-connector-c \
|
||||||
|
{% endif %}
|
||||||
|
{% if "postgresql" in features %}
|
||||||
|
postgresql-libs \
|
||||||
|
{% endif %}
|
||||||
|
ca-certificates
|
||||||
|
{% else %}
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
{% if "amd64" not in target_file %}
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
{% if package_arch_target is defined %}
|
||||||
|
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
||||||
|
{% else %}
|
||||||
|
COPY --from=build /app/target/release/vaultwarden .
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
9
docker/Makefile
Normal file
9
docker/Makefile
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
||||||
|
|
||||||
|
all: $(OBJECTS)
|
||||||
|
|
||||||
|
%/Dockerfile: Dockerfile.j2 render_template
|
||||||
|
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||||
|
|
||||||
|
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
||||||
|
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
3
docker/README.md
Normal file
3
docker/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
||||||
|
|
||||||
|
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
@@ -1,110 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl arm64 libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64
|
|
||||||
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/aarch64-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,109 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl arm64 libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
libc6-dev:arm64
|
|
||||||
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/aarch64-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
sqlite3 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,50 +1,50 @@
|
|||||||
# Using multistage build:
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
FROM alpine:3.11 as vault
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
RUN apk add --no-cache --upgrade \
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
curl \
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
tar
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||||
RUN mkdir /web-vault
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||||
WORKDIR /web-vault
|
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||||
|
#
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||||
RUN curl -L $URL | tar xz
|
# [vaultwarden/web-vault:v2.19.0d]
|
||||||
RUN ls
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
FROM rust:1.51 as build
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
# Debian-based builds support multidb
|
||||||
ARG DB=postgresql
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
# Don't download rust docs
|
# Don't download rust docs
|
||||||
RUN rustup set profile minimal
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
# Using bundled SQLite, no need to install it
|
# Install DB packages
|
||||||
# RUN apt-get update && apt-get install -y\
|
|
||||||
# --no-install-recommends \
|
|
||||||
# sqlite3\
|
|
||||||
# && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
|
libmariadb-dev \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
# Copies over *only* your manifests and build files
|
||||||
@@ -52,6 +52,7 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
@@ -84,7 +85,8 @@ RUN apt-get update && apt-get install -y \
|
|||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
sqlite3 \
|
dumb-init \
|
||||||
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
@@ -95,14 +97,16 @@ EXPOSE 3012
|
|||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
COPY --from=build /app/target/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/bitwarden_rs"]
|
CMD ["/start.sh"]
|
107
docker/amd64/Dockerfile.alpine
Normal file
107
docker/amd64/Dockerfile.alpine
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||||
|
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||||
|
# [vaultwarden/web-vault:v2.19.0d]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM clux/muslrust:nightly-2021-04-14 as build
|
||||||
|
|
||||||
|
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||||
|
ARG DB=sqlite,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN rustup target add x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM alpine:3.13
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
postgresql-libs \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,101 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM debian:buster-slim
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,89 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# Musl build image for statically compiled binary
|
|
||||||
FROM clux/muslrust:nightly-2019-12-19 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmysqlclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM alpine:3.11
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
openssl \
|
|
||||||
mariadb-connector-c \
|
|
||||||
curl \
|
|
||||||
ca-certificates
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,90 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# Musl build image for statically compiled binary
|
|
||||||
FROM clux/muslrust:nightly-2019-12-19 as build
|
|
||||||
|
|
||||||
# set postgresql backend
|
|
||||||
ARG DB=postgresql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libpq-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM alpine:3.11
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
openssl \
|
|
||||||
postgresql-libs \
|
|
||||||
curl \
|
|
||||||
sqlite \
|
|
||||||
ca-certificates
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,95 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM debian:buster-slim
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
sqlite3 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,84 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# Musl build image for statically compiled binary
|
|
||||||
FROM clux/muslrust:nightly-2019-12-19 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM alpine:3.11
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
openssl \
|
|
||||||
curl \
|
|
||||||
sqlite \
|
|
||||||
ca-certificates
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
158
docker/arm64/Dockerfile
Normal file
158
docker/arm64/Dockerfile
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||||
|
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||||
|
# [vaultwarden/web-vault:v2.19.0d]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.51 as build
|
||||||
|
|
||||||
|
# Debian-based builds support multidb
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
# Install required build libs for arm64 architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture arm64 \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:arm64 \
|
||||||
|
libc6-dev:arm64 \
|
||||||
|
libpq5:arm64 \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev:arm64 \
|
||||||
|
libmariadb-dev-compat:arm64
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-aarch64-linux-gnu \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
||||||
|
|
||||||
|
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||||
|
RUN rustup target add aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/aarch64-debian:buster
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
158
docker/armv6/Dockerfile
Normal file
158
docker/armv6/Dockerfile
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||||
|
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||||
|
# [vaultwarden/web-vault:v2.19.0d]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.51 as build
|
||||||
|
|
||||||
|
# Debian-based builds support multidb
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
# Install required build libs for armel architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture armel \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armel \
|
||||||
|
libc6-dev:armel \
|
||||||
|
libpq5:armel \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev:armel \
|
||||||
|
libmariadb-dev-compat:armel
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-arm-linux-gnueabi \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
||||||
|
|
||||||
|
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||||
|
RUN rustup target add arm-unknown-linux-gnueabi
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/rpi-debian:buster
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,110 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl armel libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armel \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel
|
|
||||||
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/rpi-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,109 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl armel libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armel \
|
|
||||||
libc6-dev:armel
|
|
||||||
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/rpi-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
sqlite3 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
158
docker/armv7/Dockerfile
Normal file
158
docker/armv7/Dockerfile
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||||
|
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||||
|
# [vaultwarden/web-vault:v2.19.0d]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.51 as build
|
||||||
|
|
||||||
|
# Debian-based builds support multidb
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
# Install required build libs for armhf architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture armhf \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armhf \
|
||||||
|
libc6-dev:armhf \
|
||||||
|
libpq5:armhf \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev:armhf \
|
||||||
|
libmariadb-dev-compat:armhf
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-arm-linux-gnueabihf \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
||||||
|
|
||||||
|
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||||
|
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/armv7hf-debian:buster
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
113
docker/armv7/Dockerfile.alpine
Normal file
113
docker/armv7/Dockerfile.alpine
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||||
|
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||||
|
# [vaultwarden/web-vault:v2.19.0d]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||||
|
|
||||||
|
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||||
|
ARG DB=sqlite
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||||
|
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/armv7hf-alpine:3.13
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,111 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl armhf libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf
|
|
||||||
|
|
||||||
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/armv7hf-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,109 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.11 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl armhf libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
libc6-dev:armhf
|
|
||||||
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/armv7hf-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
sqlite3 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
59
docker/healthcheck.sh
Normal file → Executable file
59
docker/healthcheck.sh
Normal file → Executable file
@@ -1,8 +1,53 @@
|
|||||||
#!/usr/bin/env sh
|
#!/bin/sh
|
||||||
|
|
||||||
if [ -z "$ROCKET_TLS"]
|
# Use the value of the corresponding env var (if present),
|
||||||
then
|
# or a default value otherwise.
|
||||||
curl --fail http://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
|
: ${DATA_FOLDER:="data"}
|
||||||
else
|
: ${ROCKET_PORT:="80"}
|
||||||
curl --insecure --fail https://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
|
|
||||||
fi
|
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||||
|
|
||||||
|
# Given a config key, return the corresponding config value from the
|
||||||
|
# config file. If the key doesn't exist, return an empty string.
|
||||||
|
get_config_val() {
|
||||||
|
local key="$1"
|
||||||
|
# Extract a line of the form:
|
||||||
|
# "domain": "https://bw.example.com/path",
|
||||||
|
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||||
|
# To extract just the value (https://bw.example.com/path), delete:
|
||||||
|
# (1) everything up to and including the first ':',
|
||||||
|
# (2) whitespace and '"' from the front,
|
||||||
|
# (3) ',' and '"' from the back.
|
||||||
|
sed -e 's/[^:]\+://' -e 's/^[ "]\+//' -e 's/[,"]\+$//'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract the base path from a domain URL. For example:
|
||||||
|
# - `` -> ``
|
||||||
|
# - `https://bw.example.com` -> ``
|
||||||
|
# - `https://bw.example.com/` -> ``
|
||||||
|
# - `https://bw.example.com/path` -> `/path`
|
||||||
|
# - `https://bw.example.com/multi/path` -> `/multi/path`
|
||||||
|
get_base_path() {
|
||||||
|
echo "$1" |
|
||||||
|
# Delete:
|
||||||
|
# (1) everything up to and including '://',
|
||||||
|
# (2) everything up to '/',
|
||||||
|
# (3) trailing '/' from the back.
|
||||||
|
sed -e 's|.*://||' -e 's|[^/]\+||' -e 's|/*$||'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Read domain URL from config.json, if present.
|
||||||
|
if [ -r "${CONFIG_FILE}" ]; then
|
||||||
|
domain="$(get_config_val 'domain')"
|
||||||
|
if [ -n "${domain}" ]; then
|
||||||
|
# config.json 'domain' overrides the DOMAIN env var.
|
||||||
|
DOMAIN="${domain}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
base_path="$(get_base_path "${DOMAIN}")"
|
||||||
|
if [ -n "${ROCKET_TLS}" ]; then
|
||||||
|
s='s'
|
||||||
|
fi
|
||||||
|
curl --insecure --fail --silent --show-error \
|
||||||
|
"http${s}://localhost:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||||
|
17
docker/render_template
Executable file
17
docker/render_template
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import os, argparse, json
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
|
||||||
|
args_parser = argparse.ArgumentParser()
|
||||||
|
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
||||||
|
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
||||||
|
cli_args = args_parser.parse_args()
|
||||||
|
|
||||||
|
render_vars = json.loads(cli_args.render_vars)
|
||||||
|
environment = jinja2.Environment(
|
||||||
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
|
trim_blocks=True,
|
||||||
|
)
|
||||||
|
print(environment.get_template(cli_args.template_file).render(render_vars))
|
25
docker/start.sh
Executable file
25
docker/start.sh
Executable file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
if [ -r /etc/vaultwarden.sh ]; then
|
||||||
|
. /etc/vaultwarden.sh
|
||||||
|
elif [ -r /etc/bitwarden_rs.sh ]; then
|
||||||
|
echo "### You are using the old /etc/bitwarden_rs.sh script, please migrate to /etc/vaultwarden.sh ###"
|
||||||
|
. /etc/bitwarden_rs.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -d /etc/vaultwarden.d ]; then
|
||||||
|
for f in /etc/vaultwarden.d/*.sh; do
|
||||||
|
if [ -r $f ]; then
|
||||||
|
. $f
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
elif [ -d /etc/bitwarden_rs.d ]; then
|
||||||
|
echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###"
|
||||||
|
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||||
|
if [ -r $f ]; then
|
||||||
|
. $f
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec /vaultwarden "${@}"
|
20
hooks/README.md
Normal file
20
hooks/README.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
||||||
|
|
||||||
|
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
||||||
|
|
||||||
|
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
||||||
|
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
||||||
|
* `COMMIT_MSG`: the message from the commit being tested and built.
|
||||||
|
* `DOCKER_REPO`: the name of the Docker repository being built.
|
||||||
|
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
||||||
|
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||||
|
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||||
|
|
||||||
|
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
* https://docs.docker.com/docker-hub/builds/advanced/
|
||||||
|
* https://docs.docker.com/engine/reference/commandline/manifest/
|
||||||
|
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
||||||
|
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
16
hooks/arches.sh
Normal file
16
hooks/arches.sh
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# The default Debian-based images support these arches for all database backends.
|
||||||
|
arches=(
|
||||||
|
amd64
|
||||||
|
armv6
|
||||||
|
armv7
|
||||||
|
arm64
|
||||||
|
)
|
||||||
|
|
||||||
|
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||||
|
# The Alpine image build currently only works for certain arches.
|
||||||
|
distro_suffix=.alpine
|
||||||
|
arches=(
|
||||||
|
amd64
|
||||||
|
armv7
|
||||||
|
)
|
||||||
|
fi
|
45
hooks/build
Executable file
45
hooks/build
Executable file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ">>> Building images..."
|
||||||
|
|
||||||
|
source ./hooks/arches.sh
|
||||||
|
|
||||||
|
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||||
|
# This var is typically predefined by Docker Hub, but it won't be
|
||||||
|
# when testing locally.
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Construct a version string in the style of `build.rs`.
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
LABELS=(
|
||||||
|
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||||
|
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
|
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||||
|
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||||
|
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||||
|
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
|
||||||
|
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||||
|
)
|
||||||
|
LABEL_ARGS=()
|
||||||
|
for label in "${LABELS[@]}"; do
|
||||||
|
LABEL_ARGS+=(--label "${label}")
|
||||||
|
done
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
for arch in "${arches[@]}"; do
|
||||||
|
docker build \
|
||||||
|
"${LABEL_ARGS[@]}" \
|
||||||
|
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||||
|
-f docker/${arch}/Dockerfile${distro_suffix} \
|
||||||
|
.
|
||||||
|
done
|
28
hooks/pre_build
Executable file
28
hooks/pre_build
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# If requested, print some environment info for troubleshooting.
|
||||||
|
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
||||||
|
id
|
||||||
|
pwd
|
||||||
|
df -h
|
||||||
|
env
|
||||||
|
docker info
|
||||||
|
docker version
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install build dependencies.
|
||||||
|
deps=(
|
||||||
|
jq
|
||||||
|
)
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y "${deps[@]}"
|
||||||
|
|
||||||
|
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
||||||
|
# Git operations that we perform later, so fetch the complete history and
|
||||||
|
# tags first. Note that if the build is cached, the clone may have been
|
||||||
|
# unshallowed already; if so, unshallowing will fail, so skip it.
|
||||||
|
if [[ -f .git/shallow ]]; then
|
||||||
|
git fetch --unshallow --tags
|
||||||
|
fi
|
138
hooks/push
Executable file
138
hooks/push
Executable file
@@ -0,0 +1,138 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
source ./hooks/arches.sh
|
||||||
|
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
|
||||||
|
# Join a list of args with a single char.
|
||||||
|
# Ref: https://stackoverflow.com/a/17841619
|
||||||
|
join() { local IFS="$1"; shift; echo "$*"; }
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
echo ">>> Starting local Docker registry..."
|
||||||
|
|
||||||
|
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
||||||
|
# builds, but it can't access existing images on the Docker host (like the
|
||||||
|
# cross-compiled ones we just built). Those images first need to be pushed to
|
||||||
|
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
||||||
|
# up those intermediate images on Docker Hub, it's easier to just run a local
|
||||||
|
# Docker registry, which gets cleaned up automatically once the build job ends.
|
||||||
|
#
|
||||||
|
# https://docs.docker.com/registry/deploying/
|
||||||
|
# https://hub.docker.com/_/registry
|
||||||
|
#
|
||||||
|
# Use host networking so the buildx container can access the registry via
|
||||||
|
# localhost.
|
||||||
|
#
|
||||||
|
docker run -d --name registry --network host registry:2 # defaults to port 5000
|
||||||
|
|
||||||
|
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
||||||
|
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
||||||
|
LOCAL_REGISTRY="localhost:5000"
|
||||||
|
REPO="${DOCKER_REPO#*/}"
|
||||||
|
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||||
|
|
||||||
|
echo ">>> Pushing images to local registry..."
|
||||||
|
|
||||||
|
for arch in ${arches[@]}; do
|
||||||
|
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||||
|
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||||
|
docker tag "${docker_image}" "${local_image}"
|
||||||
|
docker push "${local_image}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ">>> Setting up Docker Buildx..."
|
||||||
|
|
||||||
|
# Same as earlier, use host networking so the buildx container can access the
|
||||||
|
# registry via localhost.
|
||||||
|
#
|
||||||
|
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
||||||
|
#
|
||||||
|
docker buildx create --name builder --use --driver-opt network=host
|
||||||
|
|
||||||
|
echo ">>> Running Docker Buildx..."
|
||||||
|
|
||||||
|
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||||
|
|
||||||
|
# If the Docker tag starts with a version number, assume the latest release
|
||||||
|
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
||||||
|
# to make it easier for users to track the latest release.
|
||||||
|
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||||
|
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||||
|
tags+=(${DOCKER_REPO}:alpine)
|
||||||
|
else
|
||||||
|
tags+=(${DOCKER_REPO}:latest)
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
tag_args=()
|
||||||
|
for tag in "${tags[@]}"; do
|
||||||
|
tag_args+=(--tag "${tag}")
|
||||||
|
done
|
||||||
|
|
||||||
|
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
||||||
|
# the arch list to a platform list (assuming the OS is always `linux`).
|
||||||
|
declare -A arch_to_platform=(
|
||||||
|
[amd64]="linux/amd64"
|
||||||
|
[armv6]="linux/arm/v6"
|
||||||
|
[armv7]="linux/arm/v7"
|
||||||
|
[arm64]="linux/arm64"
|
||||||
|
)
|
||||||
|
platforms=()
|
||||||
|
for arch in ${arches[@]}; do
|
||||||
|
platforms+=("${arch_to_platform[$arch]}")
|
||||||
|
done
|
||||||
|
platforms="$(join "," "${platforms[@]}")"
|
||||||
|
|
||||||
|
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||||
|
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||||
|
# context, which isn't needed here since the actual cross-compiled images
|
||||||
|
# have already been built.
|
||||||
|
docker buildx build \
|
||||||
|
--network host \
|
||||||
|
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||||
|
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||||
|
--platform "${platforms}" \
|
||||||
|
"${tag_args[@]}" \
|
||||||
|
--push \
|
||||||
|
- < ./docker/Dockerfile.buildx
|
||||||
|
|
||||||
|
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
|
||||||
|
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
|
||||||
|
# (https://github.com/moby/moby/issues/41017).
|
||||||
|
#
|
||||||
|
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||||
|
# existing vaultwarden tags, which adhere to the naming conventions of the
|
||||||
|
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||||
|
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||||
|
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||||
|
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
|
||||||
|
#
|
||||||
|
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
|
||||||
|
# so this step can be removed once fixed versions are in wider distribution.
|
||||||
|
#
|
||||||
|
# Tags:
|
||||||
|
#
|
||||||
|
# testing => testing-arm32v6
|
||||||
|
# testing-alpine => <ignored>
|
||||||
|
# x.y.z => x.y.z-arm32v6, latest-arm32v6
|
||||||
|
# x.y.z-alpine => <ignored>
|
||||||
|
#
|
||||||
|
if [[ "${DOCKER_TAG}" != *alpine ]]; then
|
||||||
|
image="${DOCKER_REPO}":"${DOCKER_TAG}"
|
||||||
|
|
||||||
|
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
|
||||||
|
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
|
||||||
|
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
|
||||||
|
|
||||||
|
# Pull the armv6 image by digest, retag it, and repush it.
|
||||||
|
docker pull "${DOCKER_REPO}"@"${digest}"
|
||||||
|
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
|
||||||
|
docker push "${image}"-arm32v6
|
||||||
|
|
||||||
|
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||||
|
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
|
||||||
|
docker push "${DOCKER_REPO}:latest"-arm32v6
|
||||||
|
fi
|
||||||
|
fi
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE org_policies;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE org_policies (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (org_uuid, atype)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN
|
||||||
|
deleted_at DATETIME;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_collections
|
||||||
|
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;
|
@@ -0,0 +1,13 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
UPDATE ciphers
|
||||||
|
SET favorite = TRUE
|
||||||
|
WHERE EXISTS (
|
||||||
|
SELECT * FROM favorites
|
||||||
|
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||||
|
AND favorites.cipher_uuid = ciphers.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE favorites;
|
@@ -0,0 +1,16 @@
|
|||||||
|
CREATE TABLE favorites (
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
|
||||||
|
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers(uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||||
|
SELECT user_uuid, uuid
|
||||||
|
FROM ciphers
|
||||||
|
WHERE favorite = TRUE
|
||||||
|
AND user_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE ciphers
|
||||||
|
DROP COLUMN favorite;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
1
migrations/mysql/2021-03-11-190243_add_sends/down.sql
Normal file
1
migrations/mysql/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE sends;
|
25
migrations/mysql/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/mysql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE sends (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
akey TEXT NOT NULL,
|
||||||
|
password_hash BLOB,
|
||||||
|
password_salt BLOB,
|
||||||
|
password_iter INTEGER,
|
||||||
|
|
||||||
|
max_access_count INTEGER,
|
||||||
|
access_count INTEGER NOT NULL,
|
||||||
|
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
revision_date DATETIME NOT NULL,
|
||||||
|
expiration_date DATETIME,
|
||||||
|
deletion_date DATETIME NOT NULL,
|
||||||
|
|
||||||
|
disabled BOOLEAN NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE org_policies;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE org_policies (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (org_uuid, atype)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN
|
||||||
|
deleted_at TIMESTAMP;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_collections
|
||||||
|
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;
|
@@ -0,0 +1,13 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
UPDATE ciphers
|
||||||
|
SET favorite = TRUE
|
||||||
|
WHERE EXISTS (
|
||||||
|
SELECT * FROM favorites
|
||||||
|
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||||
|
AND favorites.cipher_uuid = ciphers.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE favorites;
|
@@ -0,0 +1,16 @@
|
|||||||
|
CREATE TABLE favorites (
|
||||||
|
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
|
||||||
|
cipher_uuid VARCHAR(40) NOT NULL REFERENCES ciphers(uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||||
|
SELECT user_uuid, uuid
|
||||||
|
FROM ciphers
|
||||||
|
WHERE favorite = TRUE
|
||||||
|
AND user_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE ciphers
|
||||||
|
DROP COLUMN favorite;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT true;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE sends;
|
25
migrations/postgresql/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/postgresql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE sends (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
key TEXT NOT NULL,
|
||||||
|
password_hash BYTEA,
|
||||||
|
password_salt BYTEA,
|
||||||
|
password_iter INTEGER,
|
||||||
|
|
||||||
|
max_access_count INTEGER,
|
||||||
|
access_count INTEGER NOT NULL,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP NOT NULL,
|
||||||
|
revision_date TIMESTAMP NOT NULL,
|
||||||
|
expiration_date TIMESTAMP,
|
||||||
|
deletion_date TIMESTAMP NOT NULL,
|
||||||
|
|
||||||
|
disabled BOOLEAN NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE sends RENAME COLUMN key TO akey;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE org_policies;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE org_policies (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid TEXT NOT NULL REFERENCES organizations (uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (org_uuid, atype)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN
|
||||||
|
deleted_at DATETIME;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_collections
|
||||||
|
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
@@ -0,0 +1,13 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
UPDATE ciphers
|
||||||
|
SET favorite = 1
|
||||||
|
WHERE EXISTS (
|
||||||
|
SELECT * FROM favorites
|
||||||
|
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||||
|
AND favorites.cipher_uuid = ciphers.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE favorites;
|
@@ -0,0 +1,71 @@
|
|||||||
|
CREATE TABLE favorites (
|
||||||
|
user_uuid TEXT NOT NULL REFERENCES users(uuid),
|
||||||
|
cipher_uuid TEXT NOT NULL REFERENCES ciphers(uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||||
|
SELECT user_uuid, uuid
|
||||||
|
FROM ciphers
|
||||||
|
WHERE favorite = 1
|
||||||
|
AND user_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
-- Drop the `favorite` column from the `ciphers` table, using the 12-step
|
||||||
|
-- procedure from <https://www.sqlite.org/lang_altertable.html#altertabrename>.
|
||||||
|
-- Note that some steps aren't applicable and are omitted.
|
||||||
|
|
||||||
|
-- 1. If foreign key constraints are enabled, disable them using PRAGMA foreign_keys=OFF.
|
||||||
|
--
|
||||||
|
-- Diesel runs each migration in its own transaction. `PRAGMA foreign_keys`
|
||||||
|
-- is a no-op within a transaction, so this step must be done outside of this
|
||||||
|
-- file, before starting the Diesel migrations.
|
||||||
|
|
||||||
|
-- 2. Start a transaction.
|
||||||
|
--
|
||||||
|
-- Diesel already runs each migration in its own transaction.
|
||||||
|
|
||||||
|
-- 4. Use CREATE TABLE to construct a new table "new_X" that is in the
|
||||||
|
-- desired revised format of table X. Make sure that the name "new_X" does
|
||||||
|
-- not collide with any existing table name, of course.
|
||||||
|
|
||||||
|
CREATE TABLE new_ciphers(
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
user_uuid TEXT REFERENCES users(uuid),
|
||||||
|
organization_uuid TEXT REFERENCES organizations(uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
fields TEXT,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
password_history TEXT,
|
||||||
|
deleted_at DATETIME
|
||||||
|
);
|
||||||
|
|
||||||
|
-- 5. Transfer content from X into new_X using a statement like:
|
||||||
|
-- INSERT INTO new_X SELECT ... FROM X.
|
||||||
|
|
||||||
|
INSERT INTO new_ciphers(uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||||
|
name, notes, fields, data, password_history, deleted_at)
|
||||||
|
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||||
|
name, notes, fields, data, password_history, deleted_at
|
||||||
|
FROM ciphers;
|
||||||
|
|
||||||
|
-- 6. Drop the old table X: DROP TABLE X.
|
||||||
|
|
||||||
|
DROP TABLE ciphers;
|
||||||
|
|
||||||
|
-- 7. Change the name of new_X to X using: ALTER TABLE new_X RENAME TO X.
|
||||||
|
|
||||||
|
ALTER TABLE new_ciphers RENAME TO ciphers;
|
||||||
|
|
||||||
|
-- 11. Commit the transaction started in step 2.
|
||||||
|
|
||||||
|
-- 12. If foreign keys constraints were originally enabled, reenable them now.
|
||||||
|
--
|
||||||
|
-- `PRAGMA foreign_keys` is scoped to a database connection, and Diesel
|
||||||
|
-- migrations are run in a separate database connection that is closed once
|
||||||
|
-- the migrations finish.
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
1
migrations/sqlite/2021-03-11-190243_add_sends/down.sql
Normal file
1
migrations/sqlite/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE sends;
|
25
migrations/sqlite/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/sqlite/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE sends (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT REFERENCES users (uuid),
|
||||||
|
organization_uuid TEXT REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
key TEXT NOT NULL,
|
||||||
|
password_hash BLOB,
|
||||||
|
password_salt BLOB,
|
||||||
|
password_iter INTEGER,
|
||||||
|
|
||||||
|
max_access_count INTEGER,
|
||||||
|
access_count INTEGER NOT NULL,
|
||||||
|
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
revision_date DATETIME NOT NULL,
|
||||||
|
expiration_date DATETIME,
|
||||||
|
deletion_date DATETIME NOT NULL,
|
||||||
|
|
||||||
|
disabled BOOLEAN NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE sends RENAME COLUMN key TO akey;
|
@@ -1 +1 @@
|
|||||||
nightly-2019-12-19
|
nightly-2021-04-14
|
@@ -1,2 +1,7 @@
|
|||||||
version = "Two"
|
version = "Two"
|
||||||
|
edition = "2018"
|
||||||
max_width = 120
|
max_width = 120
|
||||||
|
newline_style = "Unix"
|
||||||
|
use_small_heuristics = "Off"
|
||||||
|
struct_lit_single_line = false
|
||||||
|
overflow_delimited_expr = true
|
||||||
|
487
src/api/admin.rs
487
src/api/admin.rs
@@ -1,45 +1,70 @@
|
|||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::process::Command;
|
use std::{env, time::Duration};
|
||||||
|
|
||||||
use rocket::http::{Cookie, Cookies, SameSite};
|
use rocket::{
|
||||||
use rocket::request::{self, FlashMessage, Form, FromRequest, Request};
|
http::{Cookie, Cookies, SameSite},
|
||||||
use rocket::response::{content::Html, Flash, Redirect};
|
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||||
use rocket::{Outcome, Route};
|
response::{content::Html, Flash, Redirect},
|
||||||
|
Route,
|
||||||
|
};
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
|
|
||||||
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
use crate::{
|
||||||
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp};
|
api::{ApiResult, EmptyResult, NumberOrString},
|
||||||
use crate::config::ConfigBuilder;
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||||
use crate::db::{backup_database, models::*, DbConn};
|
config::ConfigBuilder,
|
||||||
use crate::error::Error;
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
use crate::mail;
|
error::{Error, MapResult},
|
||||||
use crate::CONFIG;
|
mail,
|
||||||
|
util::{format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker},
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
if CONFIG.admin_token().is_none() && !CONFIG.disable_admin_token() {
|
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||||
return routes![admin_disabled];
|
return routes![admin_disabled];
|
||||||
}
|
}
|
||||||
|
|
||||||
routes![
|
routes![
|
||||||
admin_login,
|
admin_login,
|
||||||
get_users,
|
get_users_json,
|
||||||
post_admin_login,
|
post_admin_login,
|
||||||
admin_page,
|
admin_page,
|
||||||
invite_user,
|
invite_user,
|
||||||
logout,
|
logout,
|
||||||
delete_user,
|
delete_user,
|
||||||
deauth_user,
|
deauth_user,
|
||||||
|
disable_user,
|
||||||
|
enable_user,
|
||||||
remove_2fa,
|
remove_2fa,
|
||||||
|
update_user_org_type,
|
||||||
update_revision_users,
|
update_revision_users,
|
||||||
post_config,
|
post_config,
|
||||||
delete_config,
|
delete_config,
|
||||||
backup_db,
|
backup_db,
|
||||||
|
test_smtp,
|
||||||
|
users_overview,
|
||||||
|
organizations_overview,
|
||||||
|
delete_organization,
|
||||||
|
diagnostics,
|
||||||
|
get_diagnostics_config
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||||
static ref CAN_BACKUP: bool = cfg!(feature = "sqlite") && Command::new("sqlite3").arg("-version").status().is_ok();
|
DbConnType::from_url(&CONFIG.database_url())
|
||||||
}
|
.map(|t| match t {
|
||||||
|
DbConnType::sqlite => "SQLite",
|
||||||
|
DbConnType::mysql => "MySQL",
|
||||||
|
DbConnType::postgresql => "PostgreSQL",
|
||||||
|
})
|
||||||
|
.unwrap_or("Unknown")
|
||||||
|
});
|
||||||
|
|
||||||
|
static CAN_BACKUP: Lazy<bool> =
|
||||||
|
Lazy::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false));
|
||||||
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
fn admin_disabled() -> &'static str {
|
fn admin_disabled() -> &'static str {
|
||||||
@@ -50,13 +75,74 @@ const COOKIE_NAME: &str = "BWRS_ADMIN";
|
|||||||
const ADMIN_PATH: &str = "/admin";
|
const ADMIN_PATH: &str = "/admin";
|
||||||
|
|
||||||
const BASE_TEMPLATE: &str = "admin/base";
|
const BASE_TEMPLATE: &str = "admin/base";
|
||||||
const VERSION: Option<&str> = option_env!("GIT_VERSION");
|
const VERSION: Option<&str> = option_env!("BWRS_VERSION");
|
||||||
|
|
||||||
|
fn admin_path() -> String {
|
||||||
|
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Referer(Option<String>);
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||||
|
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct IpHeader(Option<String>);
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||||
|
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
||||||
|
} else if req.headers().get_one("X-Client-IP").is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(String::from("X-Client-IP"))))
|
||||||
|
} else if req.headers().get_one("X-Real-IP").is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(String::from("X-Real-IP"))))
|
||||||
|
} else if req.headers().get_one("X-Forwarded-For").is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(String::from("X-Forwarded-For"))))
|
||||||
|
} else {
|
||||||
|
Outcome::Success(IpHeader(None))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Used for `Location` response headers, which must specify an absolute URI
|
||||||
|
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||||
|
fn admin_url(referer: Referer) -> String {
|
||||||
|
// If we get a referer use that to make it work when, DOMAIN is not set
|
||||||
|
if let Some(mut referer) = referer.0 {
|
||||||
|
if let Some(start_index) = referer.find(ADMIN_PATH) {
|
||||||
|
referer.truncate(start_index + ADMIN_PATH.len());
|
||||||
|
return referer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if CONFIG.domain_set() {
|
||||||
|
// Don't use CONFIG.domain() directly, since the user may want to keep a
|
||||||
|
// trailing slash there, particularly when running under a subpath.
|
||||||
|
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
|
||||||
|
} else {
|
||||||
|
// Last case, when no referer or domain set, technically invalid but better than nothing
|
||||||
|
ADMIN_PATH.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/", rank = 2)]
|
#[get("/", rank = 2)]
|
||||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||||
// If there is an error, show it
|
// If there is an error, show it
|
||||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||||
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg});
|
let json = json!({
|
||||||
|
"page_content": "admin/login",
|
||||||
|
"version": VERSION,
|
||||||
|
"error": msg,
|
||||||
|
"urlpath": CONFIG.domain_path()
|
||||||
|
});
|
||||||
|
|
||||||
// Return the page
|
// Return the page
|
||||||
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
||||||
@@ -69,30 +155,32 @@ struct LoginForm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/", data = "<data>")]
|
#[post("/", data = "<data>")]
|
||||||
fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -> Result<Redirect, Flash<Redirect>> {
|
fn post_admin_login(
|
||||||
|
data: Form<LoginForm>,
|
||||||
|
mut cookies: Cookies,
|
||||||
|
ip: ClientIp,
|
||||||
|
referer: Referer,
|
||||||
|
) -> Result<Redirect, Flash<Redirect>> {
|
||||||
let data = data.into_inner();
|
let data = data.into_inner();
|
||||||
|
|
||||||
// If the token is invalid, redirect to login page
|
// If the token is invalid, redirect to login page
|
||||||
if !_validate_token(&data.token) {
|
if !_validate_token(&data.token) {
|
||||||
error!("Invalid admin token. IP: {}", ip.ip);
|
error!("Invalid admin token. IP: {}", ip.ip);
|
||||||
Err(Flash::error(
|
Err(Flash::error(Redirect::to(admin_url(referer)), "Invalid admin token, please try again."))
|
||||||
Redirect::to(ADMIN_PATH),
|
|
||||||
"Invalid admin token, please try again.",
|
|
||||||
))
|
|
||||||
} else {
|
} else {
|
||||||
// If the token received is valid, generate JWT and save it as a cookie
|
// If the token received is valid, generate JWT and save it as a cookie
|
||||||
let claims = generate_admin_claims();
|
let claims = generate_admin_claims();
|
||||||
let jwt = encode_jwt(&claims);
|
let jwt = encode_jwt(&claims);
|
||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||||
.path(ADMIN_PATH)
|
.path(admin_path())
|
||||||
.max_age(chrono::Duration::minutes(20))
|
.max_age(time::Duration::minutes(20))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true)
|
||||||
.finish();
|
.finish();
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
Ok(Redirect::to(ADMIN_PATH))
|
Ok(Redirect::to(admin_url(referer)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,21 +195,69 @@ fn _validate_token(token: &str) -> bool {
|
|||||||
struct AdminTemplateData {
|
struct AdminTemplateData {
|
||||||
page_content: String,
|
page_content: String,
|
||||||
version: Option<&'static str>,
|
version: Option<&'static str>,
|
||||||
users: Vec<Value>,
|
users: Option<Vec<Value>>,
|
||||||
|
organizations: Option<Vec<Value>>,
|
||||||
|
diagnostics: Option<Value>,
|
||||||
config: Value,
|
config: Value,
|
||||||
can_backup: bool,
|
can_backup: bool,
|
||||||
logged_in: bool,
|
logged_in: bool,
|
||||||
|
urlpath: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AdminTemplateData {
|
impl AdminTemplateData {
|
||||||
fn new(users: Vec<Value>) -> Self {
|
fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
page_content: String::from("admin/page"),
|
page_content: String::from("admin/settings"),
|
||||||
version: VERSION,
|
version: VERSION,
|
||||||
users,
|
|
||||||
config: CONFIG.prepare_json(),
|
config: CONFIG.prepare_json(),
|
||||||
can_backup: *CAN_BACKUP,
|
can_backup: *CAN_BACKUP,
|
||||||
logged_in: true,
|
logged_in: true,
|
||||||
|
urlpath: CONFIG.domain_path(),
|
||||||
|
users: None,
|
||||||
|
organizations: None,
|
||||||
|
diagnostics: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn users(users: Vec<Value>) -> Self {
|
||||||
|
Self {
|
||||||
|
page_content: String::from("admin/users"),
|
||||||
|
version: VERSION,
|
||||||
|
users: Some(users),
|
||||||
|
config: CONFIG.prepare_json(),
|
||||||
|
can_backup: *CAN_BACKUP,
|
||||||
|
logged_in: true,
|
||||||
|
urlpath: CONFIG.domain_path(),
|
||||||
|
organizations: None,
|
||||||
|
diagnostics: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn organizations(organizations: Vec<Value>) -> Self {
|
||||||
|
Self {
|
||||||
|
page_content: String::from("admin/organizations"),
|
||||||
|
version: VERSION,
|
||||||
|
organizations: Some(organizations),
|
||||||
|
config: CONFIG.prepare_json(),
|
||||||
|
can_backup: *CAN_BACKUP,
|
||||||
|
logged_in: true,
|
||||||
|
urlpath: CONFIG.domain_path(),
|
||||||
|
users: None,
|
||||||
|
diagnostics: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn diagnostics(diagnostics: Value) -> Self {
|
||||||
|
Self {
|
||||||
|
page_content: String::from("admin/diagnostics"),
|
||||||
|
version: VERSION,
|
||||||
|
organizations: None,
|
||||||
|
config: CONFIG.prepare_json(),
|
||||||
|
can_backup: *CAN_BACKUP,
|
||||||
|
logged_in: true,
|
||||||
|
urlpath: CONFIG.domain_path(),
|
||||||
|
users: None,
|
||||||
|
diagnostics: Some(diagnostics),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,11 +267,8 @@ impl AdminTemplateData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/", rank = 1)]
|
#[get("/", rank = 1)]
|
||||||
fn admin_page(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
fn admin_page(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
let users = User::get_all(&conn);
|
let text = AdminTemplateData::new().render()?;
|
||||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
|
||||||
|
|
||||||
let text = AdminTemplateData::new(users_json).render()?;
|
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -153,76 +286,302 @@ fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> Empt
|
|||||||
err!("User already exists")
|
err!("User already exists")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !CONFIG.invitations_allowed() {
|
|
||||||
err!("Invitations are not allowed")
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut user = User::new(email);
|
let mut user = User::new(email);
|
||||||
user.save(&conn)?;
|
user.save(&conn)?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
let org_name = "bitwarden_rs";
|
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)
|
||||||
mail::send_invite(&user.email, &user.uuid, None, None, &org_name, None)
|
|
||||||
} else {
|
} else {
|
||||||
let invitation = Invitation::new(data.email);
|
let invitation = Invitation::new(data.email);
|
||||||
invitation.save(&conn)
|
invitation.save(&conn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/test/smtp", data = "<data>")]
|
||||||
|
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||||
|
let data: InviteData = data.into_inner();
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_test(&data.email)
|
||||||
|
} else {
|
||||||
|
err!("Mail is not enabled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(mut cookies: Cookies) -> Result<Redirect, ()> {
|
fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
|
||||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||||
Ok(Redirect::to(ADMIN_PATH))
|
Redirect::to(admin_url(referer))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users")]
|
#[get("/users")]
|
||||||
fn get_users(_token: AdminToken, conn: DbConn) -> JsonResult {
|
fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||||
let users = User::get_all(&conn);
|
let users = User::get_all(&conn);
|
||||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||||
|
|
||||||
Ok(Json(Value::Array(users_json)))
|
Json(Value::Array(users_json))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/users/overview")]
|
||||||
|
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
|
let users = User::get_all(&conn);
|
||||||
|
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||||
|
let users_json: Vec<Value> = users
|
||||||
|
.iter()
|
||||||
|
.map(|u| {
|
||||||
|
let mut usr = u.to_json(&conn);
|
||||||
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||||
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
||||||
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
||||||
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||||
|
usr["last_active"] = match u.last_active(&conn) {
|
||||||
|
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||||
|
None => json!("Never"),
|
||||||
|
};
|
||||||
|
usr
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let text = AdminTemplateData::users(users_json).render()?;
|
||||||
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/delete")]
|
#[post("/users/<uuid>/delete")]
|
||||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
let user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
Some(user) => user,
|
|
||||||
None => err!("User doesn't exist"),
|
|
||||||
};
|
|
||||||
|
|
||||||
user.delete(&conn)
|
user.delete(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/deauth")]
|
#[post("/users/<uuid>/deauth")]
|
||||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
Some(user) => user,
|
|
||||||
None => err!("User doesn't exist"),
|
|
||||||
};
|
|
||||||
|
|
||||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/disable")]
|
||||||
|
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
|
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
|
user.reset_security_stamp();
|
||||||
|
user.enabled = false;
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/enable")]
|
||||||
|
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
|
user.enabled = true;
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
Some(user) => user,
|
|
||||||
None => err!("User doesn't exist"),
|
|
||||||
};
|
|
||||||
|
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
struct UserOrgTypeData {
|
||||||
|
user_type: NumberOrString,
|
||||||
|
user_uuid: String,
|
||||||
|
org_uuid: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/org_type", data = "<data>")]
|
||||||
|
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let data: UserOrgTypeData = data.into_inner();
|
||||||
|
|
||||||
|
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("The specified user isn't member of the organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||||
|
Some(new_type) => new_type as i32,
|
||||||
|
None => err!("Invalid type"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||||
|
// Removing owner permmission, check that there are at least another owner
|
||||||
|
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
||||||
|
|
||||||
|
if num_owners <= 1 {
|
||||||
|
err!("Can't change the type of the last owner")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
user_to_edit.atype = new_type as i32;
|
||||||
|
user_to_edit.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/users/update_revision")]
|
#[post("/users/update_revision")]
|
||||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
User::update_all_revisions(&conn)
|
User::update_all_revisions(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/organizations/overview")]
|
||||||
|
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
|
let organizations = Organization::get_all(&conn);
|
||||||
|
let organizations_json: Vec<Value> = organizations
|
||||||
|
.iter()
|
||||||
|
.map(|o| {
|
||||||
|
let mut org = o.to_json();
|
||||||
|
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||||
|
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||||
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||||
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||||
|
org
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
||||||
|
Ok(Html(text))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/organizations/<uuid>/delete")]
|
||||||
|
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
||||||
|
org.delete(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct WebVaultVersion {
|
||||||
|
version: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct GitRelease {
|
||||||
|
tag_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct GitCommit {
|
||||||
|
sha: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||||
|
let github_api = get_reqwest_client();
|
||||||
|
|
||||||
|
Ok(github_api.get(url).timeout(Duration::from_secs(10)).send()?.error_for_status()?.json::<T>()?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has_http_access() -> bool {
|
||||||
|
let http_access = get_reqwest_client();
|
||||||
|
|
||||||
|
match http_access.head("https://github.com/dani-garcia/vaultwarden").timeout(Duration::from_secs(10)).send() {
|
||||||
|
Ok(r) => r.status().is_success(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/diagnostics")]
|
||||||
|
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
|
use crate::util::read_file_string;
|
||||||
|
use chrono::prelude::*;
|
||||||
|
use std::net::ToSocketAddrs;
|
||||||
|
|
||||||
|
// Get current running versions
|
||||||
|
let web_vault_version: WebVaultVersion =
|
||||||
|
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "bwrs-version.json")) {
|
||||||
|
Ok(s) => serde_json::from_str(&s)?,
|
||||||
|
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||||
|
Ok(s) => serde_json::from_str(&s)?,
|
||||||
|
_ => WebVaultVersion {
|
||||||
|
version: String::from("Version file missing"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Execute some environment checks
|
||||||
|
let running_within_docker = is_running_in_docker();
|
||||||
|
let has_http_access = has_http_access();
|
||||||
|
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||||
|
|| env::var_os("http_proxy").is_some()
|
||||||
|
|| env::var_os("HTTPS_PROXY").is_some()
|
||||||
|
|| env::var_os("https_proxy").is_some();
|
||||||
|
|
||||||
|
// Check if we are able to resolve DNS entries
|
||||||
|
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||||
|
Ok(Some(a)) => a.ip().to_string(),
|
||||||
|
_ => "Could not resolve domain name.".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||||
|
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||||
|
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||||
|
(
|
||||||
|
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") {
|
||||||
|
Ok(r) => r.tag_name,
|
||||||
|
_ => "-".to_string(),
|
||||||
|
},
|
||||||
|
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main") {
|
||||||
|
Ok(mut c) => {
|
||||||
|
c.sha.truncate(8);
|
||||||
|
c.sha
|
||||||
|
}
|
||||||
|
_ => "-".to_string(),
|
||||||
|
},
|
||||||
|
// Do not fetch the web-vault version when running within Docker.
|
||||||
|
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||||
|
if running_within_docker {
|
||||||
|
"-".to_string()
|
||||||
|
} else {
|
||||||
|
match get_github_api::<GitRelease>(
|
||||||
|
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||||
|
) {
|
||||||
|
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||||
|
_ => "-".to_string(),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
("-".to_string(), "-".to_string(), "-".to_string())
|
||||||
|
};
|
||||||
|
|
||||||
|
let ip_header_name = match &ip_header.0 {
|
||||||
|
Some(h) => h,
|
||||||
|
_ => "",
|
||||||
|
};
|
||||||
|
|
||||||
|
let diagnostics_json = json!({
|
||||||
|
"dns_resolved": dns_resolved,
|
||||||
|
"latest_release": latest_release,
|
||||||
|
"latest_commit": latest_commit,
|
||||||
|
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||||
|
"web_vault_version": web_vault_version.version,
|
||||||
|
"latest_web_build": latest_web_build,
|
||||||
|
"running_within_docker": running_within_docker,
|
||||||
|
"has_http_access": has_http_access,
|
||||||
|
"ip_header_exists": &ip_header.0.is_some(),
|
||||||
|
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||||
|
"ip_header_name": ip_header_name,
|
||||||
|
"ip_header_config": &CONFIG.ip_header(),
|
||||||
|
"uses_proxy": uses_proxy,
|
||||||
|
"db_type": *DB_TYPE,
|
||||||
|
"db_version": get_sql_server_version(&conn),
|
||||||
|
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||||
|
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||||
|
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||||
|
});
|
||||||
|
|
||||||
|
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
||||||
|
Ok(Html(text))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/diagnostics/config")]
|
||||||
|
fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
||||||
|
let support_json = CONFIG.get_support_json();
|
||||||
|
Json(support_json)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/config", data = "<data>")]
|
#[post("/config", data = "<data>")]
|
||||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||||
let data: ConfigBuilder = data.into_inner();
|
let data: ConfigBuilder = data.into_inner();
|
||||||
@@ -235,11 +594,11 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/config/backup_db")]
|
#[post("/config/backup_db")]
|
||||||
fn backup_db(_token: AdminToken) -> EmptyResult {
|
fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
if *CAN_BACKUP {
|
if *CAN_BACKUP {
|
||||||
backup_database()
|
backup_database(&conn)
|
||||||
} else {
|
} else {
|
||||||
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
|
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,19 +1,16 @@
|
|||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::db::models::*;
|
use crate::{
|
||||||
use crate::db::DbConn;
|
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
||||||
|
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||||
|
crypto,
|
||||||
|
db::{models::*, DbConn},
|
||||||
|
mail, CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType};
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
use crate::auth::{decode_delete, decode_invite, decode_verify_email, Headers};
|
|
||||||
use crate::crypto;
|
|
||||||
use crate::mail;
|
|
||||||
|
|
||||||
use crate::CONFIG;
|
|
||||||
|
|
||||||
use rocket::Route;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
|
||||||
routes![
|
routes![
|
||||||
register,
|
register,
|
||||||
profile,
|
profile,
|
||||||
@@ -36,6 +33,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
revision_date,
|
revision_date,
|
||||||
password_hint,
|
password_hint,
|
||||||
prelogin,
|
prelogin,
|
||||||
|
verify_password,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,7 +66,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
||||||
Some(user) => {
|
Some(user) => {
|
||||||
if !user.password_hash.is_empty() {
|
if !user.password_hash.is_empty() {
|
||||||
if CONFIG.signups_allowed() {
|
if CONFIG.is_signup_allowed(&data.Email) {
|
||||||
err!("User already exists")
|
err!("User already exists")
|
||||||
} else {
|
} else {
|
||||||
err!("Registration not allowed or user already exists")
|
err!("Registration not allowed or user already exists")
|
||||||
@@ -89,14 +87,17 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
user
|
user
|
||||||
} else if CONFIG.signups_allowed() {
|
} else if CONFIG.is_signup_allowed(&data.Email) {
|
||||||
err!("Account with this email already exists")
|
err!("Account with this email already exists")
|
||||||
} else {
|
} else {
|
||||||
err!("Registration not allowed or user already exists")
|
err!("Registration not allowed or user already exists")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
if CONFIG.signups_allowed() || Invitation::take(&data.Email, &conn) || CONFIG.can_signup_user(&data.Email) {
|
// Order is important here; the invitation check must come first
|
||||||
|
// because the vaultwarden admin can invite anyone, regardless
|
||||||
|
// of other signup restrictions.
|
||||||
|
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
||||||
User::new(data.Email.clone())
|
User::new(data.Email.clone())
|
||||||
} else {
|
} else {
|
||||||
err!("Registration not allowed or user already exists")
|
err!("Registration not allowed or user already exists")
|
||||||
@@ -115,7 +116,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
user.client_kdf_type = client_kdf_type;
|
user.client_kdf_type = client_kdf_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.MasterPasswordHash);
|
user.set_password(&data.MasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
// Add extra fields if present
|
// Add extra fields if present
|
||||||
@@ -139,10 +140,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
user.last_verifying_at = Some(user.created_at);
|
user.last_verifying_at = Some(user.created_at);
|
||||||
} else {
|
} else if let Err(e) = mail::send_welcome(&user.email) {
|
||||||
if let Err(e) = mail::send_welcome(&user.email) {
|
error!("Error sending welcome email: {:#?}", e);
|
||||||
error!("Error sending welcome email: {:#?}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,8 +149,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/accounts/profile")]
|
#[get("/accounts/profile")]
|
||||||
fn profile(headers: Headers, conn: DbConn) -> JsonResult {
|
fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
Ok(Json(headers.user.to_json(&conn)))
|
Json(headers.user.to_json(&conn))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -207,7 +206,12 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
|||||||
user.public_key = Some(data.PublicKey);
|
user.public_key = Some(data.PublicKey);
|
||||||
|
|
||||||
user.save(&conn)?;
|
user.save(&conn)?;
|
||||||
Ok(Json(user.to_json(&conn)))
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"PrivateKey": user.private_key,
|
||||||
|
"PublicKey": user.public_key,
|
||||||
|
"Object":"keys"
|
||||||
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -227,7 +231,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, Some("post_rotatekey"));
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -254,7 +258,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
|||||||
|
|
||||||
user.client_kdf_iter = data.KdfIterations;
|
user.client_kdf_iter = data.KdfIterations;
|
||||||
user.client_kdf_type = data.Kdf;
|
user.client_kdf_type = data.Kdf;
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -316,15 +320,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||||||
err!("The cipher is not owned by the user")
|
err!("The cipher is not owned by the user")
|
||||||
}
|
}
|
||||||
|
|
||||||
update_cipher_from_data(
|
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::CipherUpdate)?
|
||||||
&mut saved_cipher,
|
|
||||||
cipher_data,
|
|
||||||
&headers,
|
|
||||||
false,
|
|
||||||
&conn,
|
|
||||||
&nt,
|
|
||||||
UpdateType::CipherUpdate,
|
|
||||||
)?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update user data
|
// Update user data
|
||||||
@@ -333,6 +329,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.private_key = Some(data.PrivateKey);
|
user.private_key = Some(data.PrivateKey);
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
|
user.reset_stamp_exception();
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -371,8 +368,8 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
|||||||
err!("Email already in use");
|
err!("Email already in use");
|
||||||
}
|
}
|
||||||
|
|
||||||
if !CONFIG.signups_allowed() && !CONFIG.can_signup_user(&data.NewEmail) {
|
if !CONFIG.is_email_domain_allowed(&data.NewEmail) {
|
||||||
err!("Email cannot be changed to this address");
|
err!("Email domain not allowed");
|
||||||
}
|
}
|
||||||
|
|
||||||
let token = crypto::generate_token(6)?;
|
let token = crypto::generate_token(6)?;
|
||||||
@@ -440,7 +437,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||||||
user.email_new = None;
|
user.email_new = None;
|
||||||
user.email_new_token = None;
|
user.email_new_token = None;
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
@@ -455,7 +452,7 @@ fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||||
error!("Error sending delete account email: {:#?}", e);
|
error!("Error sending verify_email email: {:#?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -606,7 +603,7 @@ struct PreloginData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/prelogin", data = "<data>")]
|
#[post("/accounts/prelogin", data = "<data>")]
|
||||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||||
let data: PreloginData = data.into_inner().data;
|
let data: PreloginData = data.into_inner().data;
|
||||||
|
|
||||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||||
@@ -614,8 +611,25 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
|||||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Kdf": kdf_type,
|
"Kdf": kdf_type,
|
||||||
"KdfIterations": kdf_iter
|
"KdfIterations": kdf_iter
|
||||||
})))
|
}))
|
||||||
|
}
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct VerifyPasswordData {
|
||||||
|
MasterPasswordHash: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/verify-password", data = "<data>")]
|
||||||
|
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||||
|
let data: VerifyPasswordData = data.into_inner().data;
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
|
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password")
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@@ -1,28 +1,33 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use rocket::http::ContentType;
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use rocket::{request::Form, Data, Route};
|
use rocket::{http::ContentType, request::Form, Data, Route};
|
||||||
|
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use multipart::server::save::SavedData;
|
|
||||||
use multipart::server::{Multipart, SaveResult};
|
|
||||||
|
|
||||||
use data_encoding::HEXLOWER;
|
use data_encoding::HEXLOWER;
|
||||||
|
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||||
|
|
||||||
use crate::db::models::*;
|
use crate::{
|
||||||
use crate::db::DbConn;
|
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
||||||
|
auth::Headers,
|
||||||
use crate::crypto;
|
crypto,
|
||||||
|
db::{models::*, DbConn, DbPool},
|
||||||
use crate::api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType};
|
CONFIG,
|
||||||
use crate::auth::Headers;
|
};
|
||||||
|
|
||||||
use crate::CONFIG;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
|
// Note that many routes have an `admin` variant; this seems to be
|
||||||
|
// because the stored procedure that upstream Bitwarden uses to determine
|
||||||
|
// whether the user can edit a cipher doesn't take into account whether
|
||||||
|
// the user is an org owner/admin. The `admin` variant first checks
|
||||||
|
// whether the user is an owner/admin of the relevant org, and if so,
|
||||||
|
// allows the operation unconditionally.
|
||||||
|
//
|
||||||
|
// vaultwarden factors in the org owner/admin status as part of
|
||||||
|
// determining the write accessibility of a cipher, so most
|
||||||
|
// admin/non-admin implementations can be shared.
|
||||||
routes![
|
routes![
|
||||||
sync,
|
sync,
|
||||||
get_ciphers,
|
get_ciphers,
|
||||||
@@ -44,15 +49,24 @@ pub fn routes() -> Vec<Route> {
|
|||||||
post_cipher_admin,
|
post_cipher_admin,
|
||||||
post_cipher_share,
|
post_cipher_share,
|
||||||
put_cipher_share,
|
put_cipher_share,
|
||||||
put_cipher_share_seleted,
|
put_cipher_share_selected,
|
||||||
post_cipher,
|
post_cipher,
|
||||||
put_cipher,
|
put_cipher,
|
||||||
delete_cipher_post,
|
delete_cipher_post,
|
||||||
delete_cipher_post_admin,
|
delete_cipher_post_admin,
|
||||||
|
delete_cipher_put,
|
||||||
|
delete_cipher_put_admin,
|
||||||
delete_cipher,
|
delete_cipher,
|
||||||
delete_cipher_admin,
|
delete_cipher_admin,
|
||||||
delete_cipher_selected,
|
delete_cipher_selected,
|
||||||
delete_cipher_selected_post,
|
delete_cipher_selected_post,
|
||||||
|
delete_cipher_selected_put,
|
||||||
|
delete_cipher_selected_admin,
|
||||||
|
delete_cipher_selected_post_admin,
|
||||||
|
delete_cipher_selected_put_admin,
|
||||||
|
restore_cipher_put,
|
||||||
|
restore_cipher_put_admin,
|
||||||
|
restore_cipher_selected,
|
||||||
delete_all,
|
delete_all,
|
||||||
move_cipher_selected,
|
move_cipher_selected,
|
||||||
move_cipher_selected_put,
|
move_cipher_selected_put,
|
||||||
@@ -63,6 +77,15 @@ pub fn routes() -> Vec<Route> {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn purge_trashed_ciphers(pool: DbPool) {
|
||||||
|
debug!("Purging trashed ciphers");
|
||||||
|
if let Ok(conn) = pool.get() {
|
||||||
|
Cipher::purge_trash(&conn);
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while purging trashed ciphers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(FromForm, Default)]
|
#[derive(FromForm, Default)]
|
||||||
struct SyncData {
|
struct SyncData {
|
||||||
#[form(field = "excludeDomains")]
|
#[form(field = "excludeDomains")]
|
||||||
@@ -70,51 +93,57 @@ struct SyncData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/sync?<data..>")]
|
#[get("/sync?<data..>")]
|
||||||
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let user_json = headers.user.to_json(&conn);
|
let user_json = headers.user.to_json(&conn);
|
||||||
|
|
||||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||||
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
let collections_json: Vec<Value> =
|
||||||
|
collections.iter().map(|c| c.to_json_details(&headers.user.uuid, &conn)).collect();
|
||||||
|
|
||||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
.iter()
|
|
||||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||||
.collect();
|
let ciphers_json: Vec<Value> =
|
||||||
|
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||||
|
|
||||||
|
let sends = Send::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
let sends_json: Vec<Value> = sends.iter().map(|s| s.to_json()).collect();
|
||||||
|
|
||||||
let domains_json = if data.exclude_domains {
|
let domains_json = if data.exclude_domains {
|
||||||
Value::Null
|
Value::Null
|
||||||
} else {
|
} else {
|
||||||
api::core::_get_eq_domains(headers, true).unwrap().into_inner()
|
api::core::_get_eq_domains(headers, true).into_inner()
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Profile": user_json,
|
"Profile": user_json,
|
||||||
"Folders": folders_json,
|
"Folders": folders_json,
|
||||||
"Collections": collections_json,
|
"Collections": collections_json,
|
||||||
|
"Policies": policies_json,
|
||||||
"Ciphers": ciphers_json,
|
"Ciphers": ciphers_json,
|
||||||
"Domains": domains_json,
|
"Domains": domains_json,
|
||||||
|
"Sends": sends_json,
|
||||||
|
"unofficialServer": true,
|
||||||
"Object": "sync"
|
"Object": "sync"
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers")]
|
#[get("/ciphers")]
|
||||||
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||||
|
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> =
|
||||||
.iter()
|
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": ciphers_json,
|
"Data": ciphers_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers/<uuid>")]
|
#[get("/ciphers/<uuid>")]
|
||||||
@@ -177,6 +206,14 @@ pub struct CipherData {
|
|||||||
#[serde(rename = "Attachments")]
|
#[serde(rename = "Attachments")]
|
||||||
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
||||||
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
||||||
|
|
||||||
|
// The revision datetime (in ISO 8601 format) of the client's local copy
|
||||||
|
// of the cipher. This is used to prevent a client from updating a cipher
|
||||||
|
// when it doesn't have the latest version, as that can result in data
|
||||||
|
// loss. It's not an error when no value is provided; this can happen
|
||||||
|
// when using older client versions, or if the operation doesn't involve
|
||||||
|
// updating an existing cipher.
|
||||||
|
LastKnownRevisionDate: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -186,22 +223,46 @@ pub struct Attachments2Data {
|
|||||||
Key: String,
|
Key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Called when an org admin clones an org cipher.
|
||||||
#[post("/ciphers/admin", data = "<data>")]
|
#[post("/ciphers/admin", data = "<data>")]
|
||||||
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: ShareCipherData = data.into_inner().data;
|
post_ciphers_create(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called when creating a new org-owned cipher, or cloning a cipher (whether
|
||||||
|
/// user- or org-owned). When cloning a cipher to a user-owned cipher,
|
||||||
|
/// `organizationId` is null.
|
||||||
|
#[post("/ciphers/create", data = "<data>")]
|
||||||
|
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
let mut data: ShareCipherData = data.into_inner().data;
|
||||||
|
|
||||||
|
// Check if there are one more more collections selected when this cipher is part of an organization.
|
||||||
|
// err if this is not the case before creating an empty cipher.
|
||||||
|
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
|
||||||
|
err!("You must select at least one collection.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// This check is usually only needed in update_cipher_from_data(), but we
|
||||||
|
// need it here as well to avoid creating an empty cipher in the call to
|
||||||
|
// cipher.save() below.
|
||||||
|
enforce_personal_ownership_policy(&data.Cipher, &headers, &conn)?;
|
||||||
|
|
||||||
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
||||||
cipher.user_uuid = Some(headers.user.uuid.clone());
|
cipher.user_uuid = Some(headers.user.uuid.clone());
|
||||||
cipher.save(&conn)?;
|
cipher.save(&conn)?;
|
||||||
|
|
||||||
|
// When cloning a cipher, the Bitwarden clients seem to set this field
|
||||||
|
// based on the cipher being cloned (when creating a new cipher, it's set
|
||||||
|
// to null as expected). However, `cipher.created_at` is initialized to
|
||||||
|
// the current time, so the stale data check will end up failing down the
|
||||||
|
// line. Since this function only creates new ciphers (whether by cloning
|
||||||
|
// or otherwise), we can just ignore this field entirely.
|
||||||
|
data.Cipher.LastKnownRevisionDate = None;
|
||||||
|
|
||||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/create", data = "<data>")]
|
/// Called when creating a new user-owned cipher.
|
||||||
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
|
||||||
post_ciphers_admin(data, headers, conn, nt)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[post("/ciphers", data = "<data>")]
|
#[post("/ciphers", data = "<data>")]
|
||||||
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: CipherData = data.into_inner().data;
|
let data: CipherData = data.into_inner().data;
|
||||||
@@ -212,6 +273,24 @@ fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt
|
|||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
|
||||||
|
/// A non-owner/admin user belonging to an org with the personal ownership policy
|
||||||
|
/// enabled isn't allowed to create new user-owned ciphers or modify existing ones
|
||||||
|
/// (that were created before the policy was applicable to the user). The user is
|
||||||
|
/// allowed to delete or share such ciphers to an org, however.
|
||||||
|
///
|
||||||
|
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
|
||||||
|
fn enforce_personal_ownership_policy(data: &CipherData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||||
|
if data.OrganizationId.is_none() {
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
let policy_type = OrgPolicyType::PersonalOwnership;
|
||||||
|
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||||
|
err!("Due to an Enterprise Policy, you are restricted from saving items to your personal vault.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn update_cipher_from_data(
|
pub fn update_cipher_from_data(
|
||||||
cipher: &mut Cipher,
|
cipher: &mut Cipher,
|
||||||
data: CipherData,
|
data: CipherData,
|
||||||
@@ -221,6 +300,20 @@ pub fn update_cipher_from_data(
|
|||||||
nt: &Notify,
|
nt: &Notify,
|
||||||
ut: UpdateType,
|
ut: UpdateType,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
enforce_personal_ownership_policy(&data, headers, conn)?;
|
||||||
|
|
||||||
|
// Check that the client isn't updating an existing cipher with stale data.
|
||||||
|
if let Some(dt) = data.LastKnownRevisionDate {
|
||||||
|
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||||
|
// ISO 8601 format
|
||||||
|
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||||
|
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||||
|
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||||
|
}
|
||||||
|
Ok(_) => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
||||||
err!("Organization mismatch. Please resync the client before updating the cipher")
|
err!("Organization mismatch. Please resync the client before updating the cipher")
|
||||||
}
|
}
|
||||||
@@ -234,6 +327,11 @@ pub fn update_cipher_from_data(
|
|||||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
||||||
{
|
{
|
||||||
cipher.organization_uuid = Some(org_id);
|
cipher.organization_uuid = Some(org_id);
|
||||||
|
// After some discussion in PR #1329 re-added the user_uuid = None again.
|
||||||
|
// TODO: Audit/Check the whole save/update cipher chain.
|
||||||
|
// Upstream uses the user_uuid to allow a cipher added by a user to an org to still allow the user to view/edit the cipher
|
||||||
|
// even when the user has hide-passwords configured as there policy.
|
||||||
|
// Removing the line below would fix that, but we have to check which effect this would have on the rest of the code.
|
||||||
cipher.user_uuid = None;
|
cipher.user_uuid = None;
|
||||||
} else {
|
} else {
|
||||||
err!("You don't have permission to add cipher directly to organization")
|
err!("You don't have permission to add cipher directly to organization")
|
||||||
@@ -264,7 +362,10 @@ pub fn update_cipher_from_data(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if saved_att.cipher_uuid != cipher.uuid {
|
if saved_att.cipher_uuid != cipher.uuid {
|
||||||
err!("Attachment is not owned by the cipher")
|
// Warn and break here since cloning ciphers provides attachment data but will not be cloned.
|
||||||
|
// If we error out here it will break the whole cloning and causes empty ciphers to appear.
|
||||||
|
warn!("Attachment is not owned by the cipher");
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
saved_att.akey = Some(attachment.Key);
|
saved_att.akey = Some(attachment.Key);
|
||||||
@@ -274,6 +375,20 @@ pub fn update_cipher_from_data(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cleanup cipher data, like removing the 'Response' key.
|
||||||
|
// This key is somewhere generated during Javascript so no way for us this fix this.
|
||||||
|
// Also, upstream only retrieves keys they actually want to store, and thus skip the 'Response' key.
|
||||||
|
// We do not mind which data is in it, the keep our model more flexible when there are upstream changes.
|
||||||
|
// But, we at least know we do not need to store and return this specific key.
|
||||||
|
fn _clean_cipher_data(mut json_data: Value) -> Value {
|
||||||
|
if json_data.is_array() {
|
||||||
|
json_data.as_array_mut().unwrap().iter_mut().for_each(|ref mut f| {
|
||||||
|
f.as_object_mut().unwrap().remove("Response");
|
||||||
|
});
|
||||||
|
};
|
||||||
|
json_data
|
||||||
|
}
|
||||||
|
|
||||||
let type_data_opt = match data.Type {
|
let type_data_opt = match data.Type {
|
||||||
1 => data.Login,
|
1 => data.Login,
|
||||||
2 => data.SecureNote,
|
2 => data.SecureNote,
|
||||||
@@ -282,29 +397,28 @@ pub fn update_cipher_from_data(
|
|||||||
_ => err!("Invalid type"),
|
_ => err!("Invalid type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut type_data = match type_data_opt {
|
let type_data = match type_data_opt {
|
||||||
Some(data) => data,
|
Some(mut data) => {
|
||||||
|
// Remove the 'Response' key from the base object.
|
||||||
|
data.as_object_mut().unwrap().remove("Response");
|
||||||
|
// Remove the 'Response' key from every Uri.
|
||||||
|
if data["Uris"].is_array() {
|
||||||
|
data["Uris"] = _clean_cipher_data(data["Uris"].clone());
|
||||||
|
}
|
||||||
|
data
|
||||||
|
}
|
||||||
None => err!("Data missing"),
|
None => err!("Data missing"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: ******* Backwards compat start **********
|
|
||||||
// To remove backwards compatibility, just delete this code,
|
|
||||||
// and remove the compat code from cipher::to_json
|
|
||||||
type_data["Name"] = Value::String(data.Name.clone());
|
|
||||||
type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null);
|
|
||||||
type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null);
|
|
||||||
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
|
|
||||||
// TODO: ******* Backwards compat end **********
|
|
||||||
|
|
||||||
cipher.favorite = data.Favorite.unwrap_or(false);
|
|
||||||
cipher.name = data.Name;
|
cipher.name = data.Name;
|
||||||
cipher.notes = data.Notes;
|
cipher.notes = data.Notes;
|
||||||
cipher.fields = data.Fields.map(|f| f.to_string());
|
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
||||||
cipher.data = type_data.to_string();
|
cipher.data = type_data.to_string();
|
||||||
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
||||||
|
|
||||||
cipher.save(&conn)?;
|
cipher.save(&conn)?;
|
||||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
||||||
|
cipher.set_favorite(data.Favorite, &headers.user.uuid, &conn)?;
|
||||||
|
|
||||||
if ut != UpdateType::None {
|
if ut != UpdateType::None {
|
||||||
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
||||||
@@ -367,6 +481,7 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Called when an org admin modifies an existing org cipher.
|
||||||
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
||||||
fn put_cipher_admin(
|
fn put_cipher_admin(
|
||||||
uuid: String,
|
uuid: String,
|
||||||
@@ -403,6 +518,11 @@ fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn
|
|||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// TODO: Check if only the folder ID or favorite status is being changed.
|
||||||
|
// These are per-user properties that technically aren't part of the
|
||||||
|
// cipher itself, so the user shouldn't need write access to change these.
|
||||||
|
// Interestingly, upstream Bitwarden doesn't properly handle this either.
|
||||||
|
|
||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||||
err!("Cipher is not write accessible")
|
err!("Cipher is not write accessible")
|
||||||
}
|
}
|
||||||
@@ -467,11 +587,8 @@ fn post_collections_admin(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let posted_collections: HashSet<String> = data.CollectionIds.iter().cloned().collect();
|
let posted_collections: HashSet<String> = data.CollectionIds.iter().cloned().collect();
|
||||||
let current_collections: HashSet<String> = cipher
|
let current_collections: HashSet<String> =
|
||||||
.get_collections(&headers.user.uuid, &conn)
|
cipher.get_collections(&headers.user.uuid, &conn).iter().cloned().collect();
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
||||||
match Collection::find_by_uuid(&collection, &conn) {
|
match Collection::find_by_uuid(&collection, &conn) {
|
||||||
@@ -536,7 +653,7 @@ struct ShareSelectedCipherData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/share", data = "<data>")]
|
#[put("/ciphers/share", data = "<data>")]
|
||||||
fn put_cipher_share_seleted(
|
fn put_cipher_share_selected(
|
||||||
data: JsonUpcase<ShareSelectedCipherData>,
|
data: JsonUpcase<ShareSelectedCipherData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
@@ -599,10 +716,13 @@ fn share_cipher_by_uuid(
|
|||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut shared_to_collection = false;
|
||||||
|
|
||||||
match data.Cipher.OrganizationId.clone() {
|
match data.Cipher.OrganizationId.clone() {
|
||||||
None => err!("Organization id not provided"),
|
// If we don't get an organization ID, we don't do anything
|
||||||
|
// No error because this is used when using the Clone functionality
|
||||||
|
None => {}
|
||||||
Some(organization_uuid) => {
|
Some(organization_uuid) => {
|
||||||
let mut shared_to_collection = false;
|
|
||||||
for uuid in &data.CollectionIds {
|
for uuid in &data.CollectionIds {
|
||||||
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
||||||
None => err!("Invalid collection ID provided"),
|
None => err!("Invalid collection ID provided"),
|
||||||
@@ -616,19 +736,20 @@ fn share_cipher_by_uuid(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
update_cipher_from_data(
|
|
||||||
&mut cipher,
|
|
||||||
data.Cipher,
|
|
||||||
&headers,
|
|
||||||
shared_to_collection,
|
|
||||||
&conn,
|
|
||||||
&nt,
|
|
||||||
UpdateType::CipherUpdate,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
|
update_cipher_from_data(
|
||||||
|
&mut cipher,
|
||||||
|
data.Cipher,
|
||||||
|
&headers,
|
||||||
|
shared_to_collection,
|
||||||
|
&conn,
|
||||||
|
&nt,
|
||||||
|
UpdateType::CipherUpdate,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
||||||
@@ -642,20 +763,49 @@ fn post_attachment(
|
|||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err_discard!("Cipher doesn't exist", data),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||||
err!("Cipher is not write accessible")
|
err_discard!("Cipher is not write accessible", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut params = content_type.params();
|
let mut params = content_type.params();
|
||||||
let boundary_pair = params.next().expect("No boundary provided");
|
let boundary_pair = params.next().expect("No boundary provided");
|
||||||
let boundary = boundary_pair.1;
|
let boundary = boundary_pair.1;
|
||||||
|
|
||||||
|
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
||||||
|
match CONFIG.user_attachment_limit() {
|
||||||
|
Some(0) => err_discard!("Attachments are disabled", data),
|
||||||
|
Some(limit_kb) => {
|
||||||
|
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn);
|
||||||
|
if left <= 0 {
|
||||||
|
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||||
|
}
|
||||||
|
Some(left as u64)
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
} else if let Some(ref org_uuid) = cipher.organization_uuid {
|
||||||
|
match CONFIG.org_attachment_limit() {
|
||||||
|
Some(0) => err_discard!("Attachments are disabled", data),
|
||||||
|
Some(limit_kb) => {
|
||||||
|
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn);
|
||||||
|
if left <= 0 {
|
||||||
|
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||||
|
}
|
||||||
|
Some(left as u64)
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err_discard!("Cipher is neither owned by a user nor an organization", data);
|
||||||
|
};
|
||||||
|
|
||||||
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher.uuid);
|
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher.uuid);
|
||||||
|
|
||||||
let mut attachment_key = None;
|
let mut attachment_key = None;
|
||||||
|
let mut error = None;
|
||||||
|
|
||||||
Multipart::with_body(data.open(), boundary)
|
Multipart::with_body(data.open(), boundary)
|
||||||
.foreach_entry(|mut field| {
|
.foreach_entry(|mut field| {
|
||||||
@@ -674,21 +824,25 @@ fn post_attachment(
|
|||||||
let file_name = HEXLOWER.encode(&crypto::get_random(vec![0; 10]));
|
let file_name = HEXLOWER.encode(&crypto::get_random(vec![0; 10]));
|
||||||
let path = base_path.join(&file_name);
|
let path = base_path.join(&file_name);
|
||||||
|
|
||||||
let size = match field.data.save().memory_threshold(0).size_limit(None).with_path(path) {
|
let size =
|
||||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) {
|
||||||
SaveResult::Full(other) => {
|
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||||
error!("Attachment is not a file: {:?}", other);
|
SaveResult::Full(other) => {
|
||||||
return;
|
std::fs::remove_file(path).ok();
|
||||||
}
|
error = Some(format!("Attachment is not a file: {:?}", other));
|
||||||
SaveResult::Partial(_, reason) => {
|
return;
|
||||||
error!("Partial result: {:?}", reason);
|
}
|
||||||
return;
|
SaveResult::Partial(_, reason) => {
|
||||||
}
|
std::fs::remove_file(path).ok();
|
||||||
SaveResult::Error(e) => {
|
error = Some(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||||
error!("Error: {:?}", e);
|
return;
|
||||||
return;
|
}
|
||||||
}
|
SaveResult::Error(e) => {
|
||||||
};
|
std::fs::remove_file(path).ok();
|
||||||
|
error = Some(format!("Error: {:?}", e));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
|
let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
|
||||||
attachment.akey = attachment_key.clone();
|
attachment.akey = attachment_key.clone();
|
||||||
@@ -699,6 +853,10 @@ fn post_attachment(
|
|||||||
})
|
})
|
||||||
.expect("Error processing multipart data");
|
.expect("Error processing multipart data");
|
||||||
|
|
||||||
|
if let Some(ref e) = error {
|
||||||
|
err!(e);
|
||||||
|
}
|
||||||
|
|
||||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
@@ -716,11 +874,7 @@ fn post_attachment_admin(
|
|||||||
post_attachment(uuid, data, content_type, headers, conn, nt)
|
post_attachment(uuid, data, content_type, headers, conn, nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post(
|
#[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
|
||||||
"/ciphers/<uuid>/attachment/<attachment_id>/share",
|
|
||||||
format = "multipart/form-data",
|
|
||||||
data = "<data>"
|
|
||||||
)]
|
|
||||||
fn post_attachment_share(
|
fn post_attachment_share(
|
||||||
uuid: String,
|
uuid: String,
|
||||||
attachment_id: String,
|
attachment_id: String,
|
||||||
@@ -774,50 +928,89 @@ fn delete_attachment_admin(
|
|||||||
|
|
||||||
#[post("/ciphers/<uuid>/delete")]
|
#[post("/ciphers/<uuid>/delete")]
|
||||||
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/<uuid>/delete-admin")]
|
#[post("/ciphers/<uuid>/delete-admin")]
|
||||||
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/<uuid>/delete")]
|
||||||
|
fn delete_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/<uuid>/delete-admin")]
|
||||||
|
fn delete_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/ciphers/<uuid>")]
|
#[delete("/ciphers/<uuid>")]
|
||||||
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/ciphers/<uuid>/admin")]
|
#[delete("/ciphers/<uuid>/admin")]
|
||||||
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/ciphers", data = "<data>")]
|
#[delete("/ciphers", data = "<data>")]
|
||||||
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
let data: Value = data.into_inner().data;
|
_delete_multiple_ciphers(data, headers, conn, false, nt)
|
||||||
|
|
||||||
let uuids = match data.get("Ids") {
|
|
||||||
Some(ids) => match ids.as_array() {
|
|
||||||
Some(ids) => ids.iter().filter_map(Value::as_str),
|
|
||||||
None => err!("Posted ids field is not an array"),
|
|
||||||
},
|
|
||||||
None => err!("Request missing ids field"),
|
|
||||||
};
|
|
||||||
|
|
||||||
for uuid in uuids {
|
|
||||||
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, &nt) {
|
|
||||||
return error;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/delete", data = "<data>")]
|
#[post("/ciphers/delete", data = "<data>")]
|
||||||
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
_delete_multiple_ciphers(data, headers, conn, false, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/delete", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
_delete_multiple_ciphers(data, headers, conn, true, nt) // soft delete
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/ciphers/admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
delete_cipher_selected(data, headers, conn, nt)
|
delete_cipher_selected(data, headers, conn, nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/ciphers/delete-admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_post_admin(
|
||||||
|
data: JsonUpcase<Value>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify,
|
||||||
|
) -> EmptyResult {
|
||||||
|
delete_cipher_selected_post(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/delete-admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_put_admin(
|
||||||
|
data: JsonUpcase<Value>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify,
|
||||||
|
) -> EmptyResult {
|
||||||
|
delete_cipher_selected_put(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/<uuid>/restore")]
|
||||||
|
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/<uuid>/restore-admin")]
|
||||||
|
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/restore", data = "<data>")]
|
||||||
|
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
_restore_multiple_ciphers(data, &headers, &conn, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct MoveCipherData {
|
struct MoveCipherData {
|
||||||
@@ -901,7 +1094,6 @@ fn delete_all(
|
|||||||
Some(user_org) => {
|
Some(user_org) => {
|
||||||
if user_org.atype == UserOrgType::Owner {
|
if user_org.atype == UserOrgType::Owner {
|
||||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||||
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
|
|
||||||
nt.send_user_update(UpdateType::Vault, &user);
|
nt.send_user_update(UpdateType::Vault, &user);
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
@@ -929,8 +1121,8 @@ fn delete_all(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {
|
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_delete: bool, nt: &Notify) -> EmptyResult {
|
||||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
@@ -939,11 +1131,87 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Not
|
|||||||
err!("Cipher can't be deleted by user")
|
err!("Cipher can't be deleted by user")
|
||||||
}
|
}
|
||||||
|
|
||||||
cipher.delete(&conn)?;
|
if soft_delete {
|
||||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
cipher.deleted_at = Some(Utc::now().naive_utc());
|
||||||
|
cipher.save(&conn)?;
|
||||||
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||||
|
} else {
|
||||||
|
cipher.delete(&conn)?;
|
||||||
|
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn _delete_multiple_ciphers(
|
||||||
|
data: JsonUpcase<Value>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
soft_delete: bool,
|
||||||
|
nt: Notify,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let data: Value = data.into_inner().data;
|
||||||
|
|
||||||
|
let uuids = match data.get("Ids") {
|
||||||
|
Some(ids) => match ids.as_array() {
|
||||||
|
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||||
|
None => err!("Posted ids field is not an array"),
|
||||||
|
},
|
||||||
|
None => err!("Request missing ids field"),
|
||||||
|
};
|
||||||
|
|
||||||
|
for uuid in uuids {
|
||||||
|
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, soft_delete, &nt) {
|
||||||
|
return error;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||||
|
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||||
|
Some(cipher) => cipher,
|
||||||
|
None => err!("Cipher doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||||
|
err!("Cipher can't be restored by user")
|
||||||
|
}
|
||||||
|
|
||||||
|
cipher.deleted_at = None;
|
||||||
|
cipher.save(&conn)?;
|
||||||
|
|
||||||
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||||
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||||
|
let data: Value = data.into_inner().data;
|
||||||
|
|
||||||
|
let uuids = match data.get("Ids") {
|
||||||
|
Some(ids) => match ids.as_array() {
|
||||||
|
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||||
|
None => err!("Posted ids field is not an array"),
|
||||||
|
},
|
||||||
|
None => err!("Request missing ids field"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut ciphers: Vec<Value> = Vec::new();
|
||||||
|
for uuid in uuids {
|
||||||
|
match _restore_cipher_by_uuid(uuid, headers, conn, nt) {
|
||||||
|
Ok(json) => ciphers.push(json.into_inner()),
|
||||||
|
err => return err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Data": ciphers,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
fn _delete_cipher_attachment_by_id(
|
fn _delete_cipher_attachment_by_id(
|
||||||
uuid: &str,
|
uuid: &str,
|
||||||
attachment_id: &str,
|
attachment_id: &str,
|
||||||
|
@@ -1,37 +1,27 @@
|
|||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::db::models::*;
|
use crate::{
|
||||||
use crate::db::DbConn;
|
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||||
|
auth::Headers,
|
||||||
|
db::{models::*, DbConn},
|
||||||
|
};
|
||||||
|
|
||||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType};
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
use crate::auth::Headers;
|
routes![get_folders, get_folder, post_folders, post_folder, put_folder, delete_folder_post, delete_folder,]
|
||||||
|
|
||||||
use rocket::Route;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
|
||||||
routes![
|
|
||||||
get_folders,
|
|
||||||
get_folder,
|
|
||||||
post_folders,
|
|
||||||
post_folder,
|
|
||||||
put_folder,
|
|
||||||
delete_folder_post,
|
|
||||||
delete_folder,
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders")]
|
#[get("/folders")]
|
||||||
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
|
||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": folders_json,
|
"Data": folders_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders/<uuid>")]
|
#[get("/folders/<uuid>")]
|
||||||
@@ -50,7 +40,6 @@ fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
|
|
||||||
pub struct FolderData {
|
pub struct FolderData {
|
||||||
pub Name: String,
|
pub Name: String,
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user