mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 10:45:57 +03:00
Compare commits
587 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
10dadfca06 | ||
|
bf73a8235f | ||
|
67a584c1d4 | ||
|
8e5f03972e | ||
|
d8abf8f98f | ||
|
cb348d2e05 | ||
|
aceb111024 | ||
|
b60a4a68c7 | ||
|
8b6dfe48b7 | ||
|
6154e03c05 | ||
|
d0b53a6a3d | ||
|
317aa679cf | ||
|
8d1bc2e539 | ||
|
50c46f6e9a | ||
|
4f1928778a | ||
|
5fcba3d7f5 | ||
|
4db42b07c4 | ||
|
cd3e2d7a5a | ||
|
d139e22042 | ||
|
892296e6d5 | ||
|
992ef399ed | ||
|
5afba46743 | ||
|
df0aa7949e | ||
|
353d2e6e01 | ||
|
f9375bb215 | ||
|
8d04ff66e7 | ||
|
e649b11511 | ||
|
bda19bdddf | ||
|
99fd92df21 | ||
|
1210310063 | ||
|
b093384385 | ||
|
cec45ae9bd | ||
|
e6dd584dd6 | ||
|
7cc74dabaf | ||
|
2336f102f9 | ||
|
cebe0f6442 | ||
|
d9c0c23819 | ||
|
aa355a96f9 | ||
|
4a85dd2480 | ||
|
213909baa5 | ||
|
6915a60332 | ||
|
52a50e9ade | ||
|
b7c9a346c1 | ||
|
2d90c6ac24 | ||
|
7f7b5447fd | ||
|
142f7bb50d | ||
|
d209df9e10 | ||
|
1b56f4266b | ||
|
d6dc6070f3 | ||
|
d66323b742 | ||
|
7b09d74b1f | ||
|
c0e3c2c5e1 | ||
|
06189a58fe | ||
|
f402dd81bb | ||
|
c885bbc947 | ||
|
63fb0e5a57 | ||
|
37d0792a7d | ||
|
c8040d2f63 | ||
|
dbcad65b68 | ||
|
226da67bc0 | ||
|
fee2b5c3fb | ||
|
6bbb3d53ae | ||
|
610b183cef | ||
|
1b64b9e164 | ||
|
b022be9ba8 | ||
|
7f11363725 | ||
|
4aa6dd22bb | ||
|
8feed2916f | ||
|
59eaa0aa0d | ||
|
d5e54cb576 | ||
|
8837660ba7 | ||
|
464a489b44 | ||
|
7035700c8d | ||
|
23c2921690 | ||
|
7d506f3633 | ||
|
b186813049 | ||
|
bfa82225da | ||
|
ffa2044563 | ||
|
d57b69952d | ||
|
5a13efefd3 | ||
|
2f9d7060bd | ||
|
0aa33a2cb4 | ||
|
fa7dbedd5d | ||
|
2ea9b66943 | ||
|
f3beaea9e9 | ||
|
39ae2f1f76 | ||
|
366b1050ec | ||
|
b3aab7a6ad | ||
|
aa8d050d6b | ||
|
5200f0e98d | ||
|
5f4abb1b7f | ||
|
dfe1e30d1b | ||
|
e27a5be47a | ||
|
56786a18f1 | ||
|
0d2399d485 | ||
|
5bfc7cfde3 | ||
|
723f0cbc1e | ||
|
b141f789f6 | ||
|
7445ee40f8 | ||
|
4a9a0f7e64 | ||
|
63aad2e5d2 | ||
|
d0baa23f9a | ||
|
7a7673103f | ||
|
05d4788d1d | ||
|
6f0dea1b56 | ||
|
439ef44973 | ||
|
2a525b42cb | ||
|
aee91acfdc | ||
|
17388ec43e | ||
|
bdc1cd13a7 | ||
|
42db4b5c77 | ||
|
53da073274 | ||
|
b010dde661 | ||
|
c9ec389b24 | ||
|
baa2841b04 | ||
|
6af5c86081 | ||
|
f60a6929a9 | ||
|
2aa97fa121 | ||
|
b59809af46 | ||
|
ed24d51d3e | ||
|
870f0d0932 | ||
|
31b77bf178 | ||
|
b525f9aa4c | ||
|
8409b31d6b | ||
|
b878495d64 | ||
|
945b85da2f | ||
|
d4577d161e | ||
|
3c8e1c3ca9 | ||
|
88dba8c4dd | ||
|
21bc3bfd53 | ||
|
4cb5122e90 | ||
|
0a2a8be0ff | ||
|
720a046610 | ||
|
64ae5d4f81 | ||
|
ff7e22c08a | ||
|
0c267d073f | ||
|
bbc6470f65 | ||
|
23f1f8a576 | ||
|
0e6f6e612a | ||
|
4d1b860dad | ||
|
6576914e55 | ||
|
12075639f3 | ||
|
3b9bfe55d0 | ||
|
a0c6a7c0de | ||
|
a2d716aec3 | ||
|
c1c60e3b68 | ||
|
ed6e852904 | ||
|
a54065420c | ||
|
aa5a05960e | ||
|
f41ba2a60f | ||
|
2215cfefb9 | ||
|
4289663a16 | ||
|
ea19c2250e | ||
|
638766b346 | ||
|
d1ff136552 | ||
|
46ec11de12 | ||
|
4283a49e0b | ||
|
1e32db8c41 | ||
|
0f944ec7e2 | ||
|
736dbc9553 | ||
|
b4a38f1f63 | ||
|
646186fe38 | ||
|
c2725916f4 | ||
|
fd334e2b7d | ||
|
f9feca1ce4 | ||
|
677fd2ff32 | ||
|
f49eb8eb4d | ||
|
b0e0d68632 | ||
|
f3c8c16d79 | ||
|
2dd5086916 | ||
|
7532072d50 | ||
|
382e6107fe | ||
|
e6c6609e19 | ||
|
4cb5918950 | ||
|
55030f3687 | ||
|
ef4072e4ff | ||
|
c78d383ed1 | ||
|
5b96270874 | ||
|
2c0742387b | ||
|
1704d14f29 | ||
|
2d7ffbf378 | ||
|
dfd63f85c0 | ||
|
cd0c49eaf6 | ||
|
080e38d227 | ||
|
1a664fba6a | ||
|
c915ef815d | ||
|
adea4ec54d | ||
|
387b5eb2dd | ||
|
6337af59ed | ||
|
475c7b8f16 | ||
|
ac120be1c6 | ||
|
b70316e6d3 | ||
|
0a0f620d0b | ||
|
9132cc4a30 | ||
|
e50edcadfb | ||
|
2685099720 | ||
|
6fa6eb18e8 | ||
|
bb79396f0e | ||
|
da9fd6b7d0 | ||
|
5b8067ef77 | ||
|
9eabcd5cae | ||
|
d6e0d4cbbd | ||
|
e5e6db2688 | ||
|
186fe24484 | ||
|
5da96d36e6 | ||
|
f4b1071e23 | ||
|
18291b6533 | ||
|
8095cb68bb | ||
|
04cd751556 | ||
|
7ce2372f51 | ||
|
aebda93afe | ||
|
2b7b1141eb | ||
|
1ff4ff72bf | ||
|
d27e91a9b0 | ||
|
7cf063b196 | ||
|
642f04d493 | ||
|
fc6e65e4b0 | ||
|
db5c98ec3b | ||
|
73c64af27e | ||
|
b3f7db813f | ||
|
59660ff087 | ||
|
69a69e8e04 | ||
|
1094f359c3 | ||
|
102ee3f871 | ||
|
acb5ab08a8 | ||
|
ae59472d9a | ||
|
5a07b193dc | ||
|
fd2edb9adc | ||
|
1d074f7b3f | ||
|
81984c4bce | ||
|
9c891baad1 | ||
|
b050c60807 | ||
|
e47a2fd0f3 | ||
|
42b9cc73ac | ||
|
edca4248aa | ||
|
b1b6bc9be0 | ||
|
818b254cef | ||
|
ddfac5e34b | ||
|
8b5c945bad | ||
|
50c5eb9c50 | ||
|
94be67eac1 | ||
|
5a05139efe | ||
|
a62dc102fb | ||
|
518d74ce21 | ||
|
7598997deb | ||
|
3c876dc202 | ||
|
1722742ab3 | ||
|
d9c0eb3cfc | ||
|
0d990e1dc0 | ||
|
60ed5ff99d | ||
|
5b98bd66ee | ||
|
abd20777fe | ||
|
7f0d0cf8a4 | ||
|
6e23a573fb | ||
|
ce9d93003c | ||
|
abfa868423 | ||
|
331f6c08fe | ||
|
c0efd3d419 | ||
|
1385d75972 | ||
|
9a787dd105 | ||
|
0dcc435bb4 | ||
|
f1a67663d1 | ||
|
0f95bdc9bb | ||
|
a0eab35768 | ||
|
027c87dd07 | ||
|
f2b31352fe | ||
|
c9376e3126 | ||
|
7cbcad0e38 | ||
|
e167798449 | ||
|
fc5928772b | ||
|
8263bdd21d | ||
|
3c1d4254e7 | ||
|
55d7c48b1d | ||
|
bf623eed7f | ||
|
84bcac0112 | ||
|
31595888ea | ||
|
5c38b2c4eb | ||
|
ebe9162af9 | ||
|
b64cf27038 | ||
|
0c4e79cff6 | ||
|
5b9129a086 | ||
|
93d4a12834 | ||
|
bf3e2dc652 | ||
|
0d0e98d783 | ||
|
5a55cfbb9b | ||
|
ac93b8a6b9 | ||
|
93786d9ebd | ||
|
a6dbb580c9 | ||
|
e62678abdb | ||
|
af50eae604 | ||
|
cb4f6aa7f6 | ||
|
5e13b1a7cb | ||
|
60b339f450 | ||
|
f71c779860 | ||
|
221a11de9b | ||
|
794483c10d | ||
|
c9934ccdb7 | ||
|
54729f3c1e | ||
|
f1a86acb98 | ||
|
6b6ea3c8bf | ||
|
bf403fee7d | ||
|
5cd920cf6f | ||
|
45d3b479bc | ||
|
c7a752b01d | ||
|
099d359628 | ||
|
006a2aacbb | ||
|
b71d9dd53e | ||
|
887e320e7f | ||
|
d7c18fd86e | ||
|
7566f3db3e | ||
|
5d05ec58be | ||
|
d9a452f558 | ||
|
dec03b3dc0 | ||
|
85950bdc0b | ||
|
f95bd3bb04 | ||
|
e33b8fab34 | ||
|
b00fbf153e | ||
|
0de5919a16 | ||
|
699777be9e | ||
|
16ff49d712 | ||
|
54c78cf06d | ||
|
303eaabeea | ||
|
6b6f5b8d04 | ||
|
0c18a7e306 | ||
|
a23a38080b | ||
|
316ca66a4b | ||
|
2f71a01877 | ||
|
d5cfbfc71d | ||
|
12612da75e | ||
|
68ec5f2a18 | ||
|
00670450df | ||
|
dbd95e08e9 | ||
|
3713f2d134 | ||
|
a85a250dfd | ||
|
5845ed2c92 | ||
|
40ed505581 | ||
|
bf0b8d9968 | ||
|
d0a7437dbd | ||
|
21b433c5d7 | ||
|
7c89bc619a | ||
|
0d3daa9fc6 | ||
|
0c1f0bad17 | ||
|
72cf59fa54 | ||
|
527bc1f625 | ||
|
2168d09421 | ||
|
1c266031d7 | ||
|
b636d20c64 | ||
|
2a9ca88c2a | ||
|
b9c434addb | ||
|
451ad47327 | ||
|
7f61dd5fe3 | ||
|
3ca85028ea | ||
|
542a73cc6e | ||
|
78d07e2fda | ||
|
b617ffd2af | ||
|
3abf173d89 | ||
|
df8aeb10e8 | ||
|
26ad06df7c | ||
|
37fff3ef4a | ||
|
28c5e63bf5 | ||
|
a07c213b3e | ||
|
ed72741f48 | ||
|
fb0c23b71f | ||
|
d98f95f536 | ||
|
6643e83b61 | ||
|
7b742009a1 | ||
|
649e2b48f3 | ||
|
81f0c2b0e8 | ||
|
80d8aa7239 | ||
|
27d4b713f6 | ||
|
b0faaf2527 | ||
|
8d06d9c111 | ||
|
c4d565b15b | ||
|
06f8e69c70 | ||
|
7db52374cd | ||
|
843f205f6f | ||
|
2ff51ae77e | ||
|
2b75d81a8b | ||
|
cad0dcbed1 | ||
|
19b8388950 | ||
|
87e08b9e50 | ||
|
0b7d6bf6df | ||
|
89fe05b6cc | ||
|
d73d74e78f | ||
|
9a682b7a45 | ||
|
94201ca133 | ||
|
99f9e7252a | ||
|
42136a7097 | ||
|
9bb4c38bf9 | ||
|
5f01db69ff | ||
|
c59a7f4a8c | ||
|
8295688bed | ||
|
9713a3a555 | ||
|
d781981bbd | ||
|
5125fdb882 | ||
|
fd9693b961 | ||
|
f38926d666 | ||
|
775d07e9a0 | ||
|
2d5f172e77 | ||
|
08f0de7b46 | ||
|
45122bed9e | ||
|
0876d4a5fd | ||
|
7d552dbdc8 | ||
|
9a60eb04c2 | ||
|
1b99da91fb | ||
|
a64a400c9c | ||
|
85c0aa1619 | ||
|
19e78e3509 | ||
|
bf6330374c | ||
|
e639d9063b | ||
|
4a88e7ec78 | ||
|
65dad5a9d1 | ||
|
ba9ad14fbb | ||
|
62c7a4d491 | ||
|
14e3dcad8e | ||
|
f4a9645b54 | ||
|
8f7900759f | ||
|
69ee4a70b4 | ||
|
e4e16ed50f | ||
|
a16c656770 | ||
|
76b7de15de | ||
|
8ba6e61fd5 | ||
|
a30a1c9703 | ||
|
76687e2df7 | ||
|
bf5aefd129 | ||
|
1fa178d1d3 | ||
|
b7eedbcddc | ||
|
920371929b | ||
|
6ddbe84bde | ||
|
690d0ed1bb | ||
|
248e7dabc2 | ||
|
4584cfe3c1 | ||
|
cc646b1519 | ||
|
e501dc6d0e | ||
|
85ac9783f0 | ||
|
5b430f22bc | ||
|
d4eb21c2d9 | ||
|
6bf8a9d93d | ||
|
605419ae1b | ||
|
b89ffb2731 | ||
|
bda123b1eb | ||
|
2c94ea075c | ||
|
4bd8eae07e | ||
|
5529264c3f | ||
|
0a5df06e77 | ||
|
2f9ac61a4e | ||
|
840cf8740a | ||
|
d8869adf52 | ||
|
a631fc0077 | ||
|
9e4d372213 | ||
|
d0bf0ab237 | ||
|
e327583aa5 | ||
|
ead2f02cbd | ||
|
c453528dc1 | ||
|
6ae48aa8c2 | ||
|
88643fd9d5 | ||
|
73e0002219 | ||
|
c49ee47de0 | ||
|
14408396bb | ||
|
6cbb724069 | ||
|
a2316ca091 | ||
|
c476e19796 | ||
|
9f393cfd9d | ||
|
450c4d4d97 | ||
|
75e62abed0 | ||
|
97f9eb1320 | ||
|
53cc8a65af | ||
|
f94ac6ca61 | ||
|
cee3fd5ba2 | ||
|
016fe2269e | ||
|
03c0a5e405 | ||
|
cbbed79036 | ||
|
4af81ec50e | ||
|
a5ba67fef2 | ||
|
4cebe1fff4 | ||
|
a984dbbdf3 | ||
|
881524bd54 | ||
|
44da9e6ca7 | ||
|
4c0c8f7432 | ||
|
f67854c59c | ||
|
a1c1b9ab3b | ||
|
395979e834 | ||
|
fce6cb5865 | ||
|
338756550a | ||
|
d014eede9a | ||
|
9930a0d752 | ||
|
9928a5404b | ||
|
a6e0ddcdf1 | ||
|
acab70ed89 | ||
|
c0d149060f | ||
|
344f00d9c9 | ||
|
b26afb970a | ||
|
34ed5ce4b3 | ||
|
9375d5b8c2 | ||
|
e3678b4b56 | ||
|
b4c95fb4ac | ||
|
0bb33e04bb | ||
|
4d33e24099 | ||
|
2cdce04662 | ||
|
756d108f6a | ||
|
ca20b3d80c | ||
|
4ab9362971 | ||
|
4e8828e41a | ||
|
f8d1cfad2a | ||
|
b0a411b733 | ||
|
81741647f3 | ||
|
f36bd72a7f | ||
|
8c10de3edd | ||
|
0ab10a7c43 | ||
|
a1a5e00ff5 | ||
|
8af4b593fa | ||
|
9bef2c120c | ||
|
f7d99c43b5 | ||
|
ca0fd7a31b | ||
|
9e1550af8e | ||
|
a99c9715f6 | ||
|
1a888b5355 | ||
|
10d5c7738a | ||
|
80f23e6d78 | ||
|
d5ed2ce6df | ||
|
5e649f0d0d | ||
|
612c0e9478 | ||
|
0d2b3bfb99 | ||
|
c934838ace | ||
|
4350e9d241 | ||
|
0cdc0cb147 | ||
|
20535065d7 | ||
|
a23f4a704b | ||
|
93f2f74767 | ||
|
37ca202247 | ||
|
37525b1e7e | ||
|
d594b5a266 | ||
|
41add45e67 | ||
|
08b168a0a1 | ||
|
978ef2bc8b | ||
|
881d1f4334 | ||
|
56b4f46d7d | ||
|
f6bd8b3462 | ||
|
1f0f64d961 | ||
|
42ba817a4c | ||
|
dd98fe860b | ||
|
1fe9f101be | ||
|
c68fbb41d2 | ||
|
91e80657e4 | ||
|
2db30f918e | ||
|
cfceac3909 | ||
|
58b046fd10 | ||
|
227779256c | ||
|
89b5f7c98d | ||
|
c666497130 | ||
|
2620a1ac8c | ||
|
ffdcafa044 | ||
|
56ffec40f4 | ||
|
96c2416903 | ||
|
340d42a1ca | ||
|
e19420160f | ||
|
1741316f42 | ||
|
4f08167d6f | ||
|
fef76e2f6f | ||
|
f16d56cb27 | ||
|
120b286f2b | ||
|
7f437b6947 | ||
|
8d6e62e18b | ||
|
d0ec410b73 | ||
|
c546a59c38 | ||
|
e5ec245626 | ||
|
6ea95d1ede | ||
|
88bea44dd8 | ||
|
8ee5d51bd4 | ||
|
c640abbcd7 | ||
|
13598c098f | ||
|
a622b4d2fb | ||
|
403f35b571 | ||
|
3968bc8016 | ||
|
ff66368cb6 | ||
|
3fb419e704 | ||
|
832f838ddd | ||
|
18703bf195 | ||
|
ff8e88a5df | ||
|
39167d333a | ||
|
f707f86c8e | ||
|
cc021a4784 | ||
|
e3c4609c2a | ||
|
89a68741d6 | ||
|
2421d49d9a | ||
|
1db37bf3d0 | ||
|
d75a80bd2d |
@@ -3,13 +3,18 @@ target
|
||||
|
||||
# Data folder
|
||||
data
|
||||
|
||||
# Misc
|
||||
.env
|
||||
.env.template
|
||||
.gitattributes
|
||||
.gitignore
|
||||
rustfmt.toml
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
.idea
|
||||
.editorconfig
|
||||
*.iml
|
||||
|
||||
# Documentation
|
||||
@@ -19,9 +24,17 @@ data
|
||||
*.yml
|
||||
*.yaml
|
||||
|
||||
# Docker folders
|
||||
# Docker
|
||||
hooks
|
||||
tools
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
docker/**
|
||||
!docker/healthcheck.sh
|
||||
!docker/start.sh
|
||||
|
||||
# Web vault
|
||||
web-vault
|
||||
web-vault
|
||||
|
||||
# Vaultwarden Resources
|
||||
resources
|
||||
|
144
.env.template
144
.env.template
@@ -1,8 +1,14 @@
|
||||
# shellcheck disable=SC2034,SC2148
|
||||
## Vaultwarden Configuration File
|
||||
## Uncomment any of the following lines to change the defaults
|
||||
##
|
||||
## Be aware that most of these settings will be overridden if they were changed
|
||||
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||
##
|
||||
## By default, Vaultwarden expects for this file to be named ".env" and located
|
||||
## in the current working directory. If this is not the case, the environment
|
||||
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||
## Vaultwarden.
|
||||
|
||||
## Main data folder
|
||||
# DATA_FOLDER=data
|
||||
@@ -24,11 +30,21 @@
|
||||
## Define the size of the connection pool used for connecting to the database.
|
||||
# DATABASE_MAX_CONNS=10
|
||||
|
||||
## Database connection initialization
|
||||
## Allows SQL statements to be run whenever a new database connection is created.
|
||||
## This is mainly useful for connection-scoped pragmas.
|
||||
## If empty, a database-specific default is used:
|
||||
## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;"
|
||||
## - MySQL: ""
|
||||
## - PostgreSQL: ""
|
||||
# DATABASE_CONN_INIT=""
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
# RSA_KEY_FILENAME=data/rsa_key
|
||||
# ICON_CACHE_FOLDER=data/icon_cache
|
||||
# ATTACHMENTS_FOLDER=data/attachments
|
||||
# SENDS_FOLDER=data/sends
|
||||
# TMP_FOLDER=data/tmp
|
||||
|
||||
## Templates data folder, by default uses embedded templates
|
||||
## Check source code to see the format
|
||||
@@ -61,11 +77,38 @@
|
||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||
# SENDS_ALLOWED=true
|
||||
|
||||
## Controls whether users can enable emergency access to their accounts.
|
||||
## This setting applies globally to all users.
|
||||
# EMERGENCY_ACCESS_ALLOWED=true
|
||||
|
||||
## Controls whether event logging is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||
# ORG_EVENTS_ENABLED=false
|
||||
|
||||
## Number of days to retain events stored in the database.
|
||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||
# EVENTS_DAYS_RETAIN=
|
||||
|
||||
## BETA FEATURE: Groups
|
||||
## Controls whether group support is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default because this is a beta feature, it contains known issues!
|
||||
## KNOW WHAT YOU ARE DOING!
|
||||
# ORG_GROUPS_ENABLED=false
|
||||
|
||||
## Job scheduler settings
|
||||
##
|
||||
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
|
||||
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||
##
|
||||
## The schedule format is a bit different from crontab as crontab does not contains seconds.
|
||||
## You can test the the format here: https://crontab.guru, but remove the first digit!
|
||||
## SEC MIN HOUR DAY OF MONTH MONTH DAY OF WEEK
|
||||
## "0 30 9,12,15 1,15 May-Aug Mon,Wed,Fri"
|
||||
## "0 30 * * * * "
|
||||
## "0 30 1 * * * "
|
||||
##
|
||||
## How often (in ms) the job scheduler thread checks for jobs that need running.
|
||||
## Set to 0 to globally disable scheduled jobs.
|
||||
# JOB_POLL_INTERVAL_MS=30000
|
||||
@@ -77,6 +120,22 @@
|
||||
## Cron schedule of the job that checks for trashed items to delete permanently.
|
||||
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
|
||||
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
|
||||
##
|
||||
## Cron schedule of the job that checks for incomplete 2FA logins.
|
||||
## Defaults to once every minute. Set blank to disable this job.
|
||||
# INCOMPLETE_2FA_SCHEDULE="30 * * * * *"
|
||||
##
|
||||
## Cron schedule of the job that sends expiration reminders to emergency access grantors.
|
||||
## Defaults to hourly (3 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE="0 3 * * * *"
|
||||
##
|
||||
## Cron schedule of the job that grants emergency access requests that have met the required wait time.
|
||||
## Defaults to hourly (7 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 7 * * * *"
|
||||
##
|
||||
## Cron schedule of the job that cleans old events from the event table.
|
||||
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
||||
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
||||
|
||||
## Enable extended logging, which shows timestamps and targets in the logs
|
||||
# EXTENDED_LOGGING=true
|
||||
@@ -86,12 +145,10 @@
|
||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||
|
||||
## Logging to file
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# LOG_FILE=/path/to/log
|
||||
|
||||
## Logging to Syslog
|
||||
## This requires extended logging
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# USE_SYSLOG=false
|
||||
|
||||
## Log level
|
||||
@@ -104,7 +161,7 @@
|
||||
## Enable WAL for the DB
|
||||
## Set to false to avoid enabling WAL during startup.
|
||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||
## this setting only prevents vaultwarden from automatically enabling it on start.
|
||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||
## Please read project wiki page about this setting first before changing the value as it can
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
@@ -113,10 +170,32 @@
|
||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||
# DB_CONNECTION_RETRIES=15
|
||||
|
||||
## Icon service
|
||||
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||
##
|
||||
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||
## redirect to the corresponding icon at the external service. An external service may
|
||||
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||
## for certain sites have been cached.
|
||||
# ICON_SERVICE=internal
|
||||
|
||||
## Icon redirect code
|
||||
## The HTTP status code to use for redirects to an external icon service.
|
||||
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||
## Temporary redirects are useful while testing different icon services, but once a service
|
||||
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||
## are currently better supported by the Bitwarden clients.
|
||||
# ICON_REDIRECT_CODE=302
|
||||
|
||||
## Disable icon downloading
|
||||
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
||||
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||
## otherwise it will delete them and they won't be downloaded again.
|
||||
## Set to true to disable icon downloading in the internal icon service.
|
||||
## This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external
|
||||
## network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons
|
||||
## will be deleted eventually, but won't be downloaded again.
|
||||
# DISABLE_ICON_DOWNLOAD=false
|
||||
|
||||
## Icon download timeout
|
||||
@@ -147,7 +226,7 @@
|
||||
# EMAIL_EXPIRATION_TIME=600
|
||||
|
||||
## Email token size
|
||||
## Number of digits in an email token (min: 6, max: 19).
|
||||
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||
# EMAIL_TOKEN_SIZE=6
|
||||
|
||||
@@ -194,11 +273,17 @@
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Vaultwarden
|
||||
|
||||
## Per-organization attachment limit (KB)
|
||||
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||
## The number of hours after which an organization invite token, emergency access invite token,
|
||||
## email verification token and deletion request token will expire (must be at least 1)
|
||||
# INVITATION_EXPIRATION_HOURS=120
|
||||
|
||||
## Per-organization attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per organization.
|
||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||
# ORG_ATTACHMENT_LIMIT=
|
||||
## Per-user attachment limit (KB).
|
||||
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
||||
## Per-user attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per user.
|
||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||
# USER_ATTACHMENT_LIMIT=
|
||||
|
||||
## Number of days to wait before auto-deleting a trashed item.
|
||||
@@ -206,19 +291,31 @@
|
||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||
# TRASH_AUTO_DELETE_DAYS=
|
||||
|
||||
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||
## master password was provided but the required 2FA step was not completed, which
|
||||
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||
## This setting applies globally to all users.
|
||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||
|
||||
## Controls the PBBKDF password iterations to apply on the server
|
||||
## The change only applies when the password is changed
|
||||
# PASSWORD_ITERATIONS=100000
|
||||
|
||||
## Whether password hint should be sent into the error response when the client request it
|
||||
# SHOW_PASSWORD_HINT=true
|
||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||
# PASSWORD_HINTS_ALLOWED=true
|
||||
|
||||
## Controls whether a password hint should be shown directly in the web page if
|
||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||
## as this provides unauthenticated access to potentially sensitive data.
|
||||
# SHOW_PASSWORD_HINT=false
|
||||
|
||||
## Domain settings
|
||||
## The domain must match the address from where you access the server
|
||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||
## like attachment downloads, email links and U2F.
|
||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||
# DOMAIN=https://bw.domain.tld:8443
|
||||
# DOMAIN=https://vw.domain.tld:8443
|
||||
|
||||
## Allowed iframe ancestors (Know the risks!)
|
||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||
@@ -227,6 +324,17 @@
|
||||
## Multiple values must be separated with a whitespace.
|
||||
# ALLOWED_IFRAME_ANCESTORS=
|
||||
|
||||
## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in.
|
||||
# LOGIN_RATELIMIT_SECONDS=60
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`.
|
||||
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||
|
||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||
# ADMIN_RATELIMIT_SECONDS=300
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||
|
||||
## Yubico (Yubikey) Settings
|
||||
## Set your Client ID and Secret Key for Yubikey OTP
|
||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||
@@ -271,9 +379,8 @@
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=vaultwarden@domain.tld
|
||||
# SMTP_FROM_NAME=Vaultwarden
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
||||
# SMTP_USERNAME=username
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_TIMEOUT=15
|
||||
@@ -288,6 +395,9 @@
|
||||
## but might need to be changed in case it trips some anti-spam filters
|
||||
# HELO_NAME=
|
||||
|
||||
## Embed images as email attachments
|
||||
# SMTP_EMBED_IMAGES=false
|
||||
|
||||
## SMTP debugging
|
||||
## When set to true this will output very detailed SMTP messages.
|
||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||
|
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@@ -1,2 +1,3 @@
|
||||
github: dani-garcia
|
||||
liberapay: dani-garcia
|
||||
custom: ["https://paypal.me/DaniGG"]
|
||||
|
273
.github/workflows/build.yml
vendored
273
.github/workflows/build.yml
vendored
@@ -2,39 +2,26 @@ name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
paths-ignore:
|
||||
- "*.md"
|
||||
- "*.txt"
|
||||
- ".dockerignore"
|
||||
- ".env.template"
|
||||
- ".gitattributes"
|
||||
- ".gitignore"
|
||||
- "azure-pipelines.yml"
|
||||
- "docker/**"
|
||||
- "hooks/**"
|
||||
- "tools/**"
|
||||
- ".github/FUNDING.yml"
|
||||
- ".github/ISSUE_TEMPLATE/**"
|
||||
- ".github/security-contact.gif"
|
||||
paths:
|
||||
- ".github/workflows/build.yml"
|
||||
- "src/**"
|
||||
- "migrations/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
pull_request:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths-ignore:
|
||||
- "*.md"
|
||||
- "*.txt"
|
||||
- ".dockerignore"
|
||||
- ".env.template"
|
||||
- ".gitattributes"
|
||||
- ".gitignore"
|
||||
- "azure-pipelines.yml"
|
||||
- "docker/**"
|
||||
- "hooks/**"
|
||||
- "tools/**"
|
||||
- ".github/FUNDING.yml"
|
||||
- ".github/ISSUE_TEMPLATE/**"
|
||||
- ".github/security-contact.gif"
|
||||
paths:
|
||||
- ".github/workflows/build.yml"
|
||||
- "src/**"
|
||||
- "migrations/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
# Make warnings errors, this is to prevent warnings slipping through.
|
||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||
env:
|
||||
@@ -43,139 +30,163 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
channel:
|
||||
- nightly
|
||||
# - stable
|
||||
target-triple:
|
||||
- x86_64-unknown-linux-gnu
|
||||
# - x86_64-unknown-linux-musl
|
||||
- "rust-toolchain" # The version defined in rust-toolchain
|
||||
- "msrv" # The supported MSRV
|
||||
include:
|
||||
- target-triple: x86_64-unknown-linux-gnu
|
||||
host-triple: x86_64-unknown-linux-gnu
|
||||
features: [sqlite,mysql,postgresql] # Remember to update the `cargo test` to match the amount of features
|
||||
channel: nightly
|
||||
os: ubuntu-18.04
|
||||
ext: ""
|
||||
# - target-triple: x86_64-unknown-linux-gnu
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,mysql,postgresql"
|
||||
# channel: stable
|
||||
# os: ubuntu-18.04
|
||||
# ext: ""
|
||||
- channel: "msrv"
|
||||
version: "1.60.0"
|
||||
|
||||
name: Build and Test ${{ matrix.channel }}
|
||||
|
||||
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install musl-tools when needed
|
||||
- name: Install musl tools
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
|
||||
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
|
||||
# End Install musl-tools when needed
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: Install dependencies Ubuntu
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||
if: startsWith( matrix.os, 'ubuntu' )
|
||||
- name: "Install dependencies Ubuntu"
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||
# End Install dependencies
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
# End Enable Rust Caching
|
||||
|
||||
# Determine rust-toolchain version
|
||||
- name: Init Variables
|
||||
id: toolchain
|
||||
shell: bash
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
||||
echo "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" | tee -a "${GITHUB_OUTPUT}"
|
||||
# End Determine rust-toolchain version
|
||||
|
||||
# Uses the rust-toolchain file to determine version
|
||||
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||
uses: actions-rs/toolchain@v1
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb # master @ 2022-10-25 - 21:40 GMT+2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target-triple }}
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
components: clippy, rustfmt
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Install the MSRV channel to be used
|
||||
- name: "Install MSRV version"
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb # master @ 2022-10-25 - 21:40 GMT+2
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}
|
||||
# End Install the MSRV channel to be used
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0
|
||||
# End Enable Rust Caching
|
||||
|
||||
|
||||
# Show environment
|
||||
- name: "Show environment"
|
||||
run: |
|
||||
rustc -vV
|
||||
cargo -vV
|
||||
# End Show environment
|
||||
|
||||
|
||||
# Run cargo tests (In release mode to speed up future builds)
|
||||
# First test all features together, afterwards test them separately.
|
||||
- name: "`cargo test --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||
# Test single features
|
||||
# 0: sqlite
|
||||
- name: "`cargo test --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[0] != '' }}
|
||||
# 1: mysql
|
||||
- name: "`cargo test --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[1] != '' }}
|
||||
# 2: postgresql
|
||||
- name: "`cargo test --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[2] != '' }}
|
||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: test_sqlite_mysql_postgresql_mimalloc
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
- name: "test features: sqlite,mysql,postgresql"
|
||||
id: test_sqlite_mysql_postgresql
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features sqlite,mysql,postgresql
|
||||
|
||||
- name: "test features: sqlite"
|
||||
id: test_sqlite
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features sqlite
|
||||
|
||||
- name: "test features: mysql"
|
||||
id: test_mysql
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features mysql
|
||||
|
||||
- name: "test features: postgresql"
|
||||
id: test_postgresql
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features postgresql
|
||||
# End Run cargo tests
|
||||
|
||||
|
||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
||||
- name: "`cargo clippy --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} -- -D warnings
|
||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: clippy
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo clippy --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||
# End Run cargo clippy
|
||||
|
||||
|
||||
# Run cargo fmt
|
||||
- name: '`cargo fmt`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
# Run cargo fmt (Only run on rust-toolchain defined version)
|
||||
- name: "check formatting"
|
||||
id: formatting
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo fmt --all -- --check
|
||||
# End Run cargo fmt
|
||||
|
||||
|
||||
# Build the binary
|
||||
- name: "`cargo build --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||
# Check for any previous failures, if there are stop, else continue.
|
||||
# This is useful so all test/clippy/fmt actions are done, and they can all be addressed
|
||||
- name: "Some checks failed"
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
echo "### :x: Checks Failed!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|Job|Status|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|---|------|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql)|${{ steps.test_sqlite_mysql_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite)|${{ steps.test_sqlite.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (mysql)|${{ steps.test_mysql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (postgresql)|${{ steps.test_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.clippy.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|fmt|${{ steps.formatting.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Please check the failed jobs and fix where needed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
|
||||
|
||||
# Check for any previous failures, if there are stop, else continue.
|
||||
# This is useful so all test/clippy/fmt actions are done, and they can all be addressed
|
||||
- name: "All checks passed"
|
||||
if: ${{ success() }}
|
||||
run: |
|
||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
# Build the binary to upload to the artifacts
|
||||
- name: "build features: sqlite,mysql,postgresql"
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo build --release --features sqlite,mysql,postgresql
|
||||
# End Build the binary
|
||||
|
||||
|
||||
# Upload artifact to Github Actions
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||
name: vaultwarden
|
||||
path: target/release/vaultwarden
|
||||
# End Upload artifact to Github Actions
|
||||
|
||||
|
||||
## This is not used at the moment
|
||||
## We could start using this when we can build static binaries
|
||||
# Upload to github actions release
|
||||
# - name: Release
|
||||
# uses: Shopify/upload-to-release@1
|
||||
# if: startsWith(github.ref, 'refs/tags/')
|
||||
# with:
|
||||
# name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
# path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# End Upload to github actions release
|
||||
|
20
.github/workflows/hadolint.yml
vendored
20
.github/workflows/hadolint.yml
vendored
@@ -1,34 +1,30 @@
|
||||
name: Hadolint
|
||||
|
||||
on:
|
||||
push:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths:
|
||||
- "docker/**"
|
||||
pull_request:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths:
|
||||
- "docker/**"
|
||||
on: [
|
||||
push,
|
||||
pull_request
|
||||
]
|
||||
|
||||
jobs:
|
||||
hadolint:
|
||||
name: Validate Dockerfile syntax
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Download hadolint
|
||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||
- name: Download hadolint
|
||||
shell: bash
|
||||
run: |
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo chmod +x /usr/local/bin/hadolint
|
||||
env:
|
||||
HADOLINT_VERSION: 2.5.0
|
||||
HADOLINT_VERSION: 2.12.0
|
||||
# End Download hadolint
|
||||
|
||||
# Test Dockerfiles
|
||||
|
118
.github/workflows/release.yml
vendored
Normal file
118
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- ".github/workflows/release.yml"
|
||||
- "src/**"
|
||||
- "migrations/**"
|
||||
- "hooks/**"
|
||||
- "docker/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "diesel.toml"
|
||||
- "rust-toolchain"
|
||||
|
||||
branches: # Only on paths above
|
||||
- main
|
||||
|
||||
tags: # Always, regardless of paths above
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
# https://github.com/marketplace/actions/skip-duplicate-actions
|
||||
# Some checks to determine if we need to continue with building a new docker.
|
||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||
skip_check:
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- name: Skip Duplicates Actions
|
||||
id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
# Only run this when not creating a tag
|
||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
||||
|
||||
docker-build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
needs: skip_check
|
||||
# Start a local docker registry to be used to generate multi-arch images.
|
||||
services:
|
||||
registry:
|
||||
image: registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1 # Disabled for now, but we should look at this because it will speedup building!
|
||||
# DOCKER_REPO/secrets.DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
|
||||
DOCKER_REPO: ${{ secrets.DOCKERHUB_REPO }}
|
||||
SOURCE_COMMIT: ${{ github.sha }}
|
||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
strategy:
|
||||
matrix:
|
||||
base_image: ["debian","alpine"]
|
||||
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Determine Docker Tag
|
||||
- name: Init Variables
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
# Check which main tag we are going to build determined by github.ref
|
||||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
||||
echo "DOCKER_TAG=${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
|
||||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
||||
echo "DOCKER_TAG=testing" | tee -a "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
# End Determine Docker Tag
|
||||
|
||||
- name: Build Debian based images
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' }}
|
||||
|
||||
- name: Push Debian based images
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' }}
|
||||
|
||||
- name: Build Alpine based images
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
|
||||
- name: Push Alpine based images
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
40
.pre-commit-config.yaml
Normal file
40
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.3.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
exclude: "(.*js$|.*css$)"
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: fmt
|
||||
name: fmt
|
||||
description: Format files with cargo fmt.
|
||||
entry: cargo fmt
|
||||
language: system
|
||||
types: [rust]
|
||||
args: ["--", "--check"]
|
||||
- id: cargo-test
|
||||
name: cargo test
|
||||
description: Test the package for errors.
|
||||
entry: cargo test
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
description: Lint Rust sources
|
||||
entry: cargo clippy
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$)
|
||||
pass_filenames: false
|
3342
Cargo.lock
generated
3342
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
182
Cargo.toml
182
Cargo.toml
@@ -2,7 +2,9 @@
|
||||
name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
rust-version = "1.60.0"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
readme = "README.md"
|
||||
@@ -11,6 +13,7 @@ publish = false
|
||||
build = "build.rs"
|
||||
|
||||
[features]
|
||||
# default = ["sqlite"]
|
||||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||
enable_syslog = []
|
||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||
@@ -18,140 +21,145 @@ postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||
# Enable to use a vendored and statically linked openssl
|
||||
vendored_openssl = ["openssl/vendored"]
|
||||
# Enable MiMalloc memory allocator to replace the default malloc
|
||||
# This can improve performance for Alpine builds
|
||||
enable_mimalloc = ["mimalloc"]
|
||||
# This is a development dependency, and should only be used during development!
|
||||
# It enables the usage of the diesel_logger crate, which is able to output the generated queries.
|
||||
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
||||
# if you want to turn off the logging for a specific run.
|
||||
query_logger = ["diesel_logger"]
|
||||
|
||||
# Enable unstable features, requires nightly
|
||||
# Currently only used to enable rusts official ip support
|
||||
unstable = []
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
syslog = "4.0.1"
|
||||
# Logging
|
||||
syslog = "6.0.1" # Needs to be v4 until fern is updated
|
||||
|
||||
[dependencies]
|
||||
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
||||
rocket = { version = "=0.5.0-dev", features = ["tls"], default-features = false }
|
||||
rocket_contrib = "=0.5.0-dev"
|
||||
# Logging
|
||||
log = "0.4.17"
|
||||
fern = { version = "0.6.1", features = ["syslog-6"] }
|
||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.4", features = ["blocking", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||
backtrace = "0.3.67" # Logging panics to logfile instead stderr only
|
||||
|
||||
# Used for custom short lived cookie jar
|
||||
cookie = "0.15.0"
|
||||
cookie_store = "0.15.0"
|
||||
bytes = "1.0.1"
|
||||
url = "2.2.2"
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenvy = { version = "0.15.6", default-features = false }
|
||||
|
||||
# multipart/form-data support
|
||||
multipart = { version = "0.18.0", features = ["server"], default-features = false }
|
||||
# Lazy initialization
|
||||
once_cell = "1.16.0"
|
||||
|
||||
# WebSockets library
|
||||
ws = { version = "0.10.0", package = "parity-ws" }
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.15"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# MessagePack library
|
||||
rmpv = "0.4.7"
|
||||
# Web framework
|
||||
rocket = { version = "0.5.0-rc.2", features = ["tls", "json"], default-features = false }
|
||||
|
||||
# Concurrent hashmap implementation
|
||||
chashmap = "2.2.2"
|
||||
# WebSockets libraries
|
||||
tokio-tungstenite = "0.18.0"
|
||||
rmpv = "1.0.0" # MessagePack library
|
||||
dashmap = "5.4.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.25"
|
||||
tokio = { version = "1.23.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.126", features = ["derive"] }
|
||||
serde_json = "1.0.64"
|
||||
|
||||
# Logging
|
||||
log = "0.4.14"
|
||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||
serde = { version = "1.0.150", features = ["derive"] }
|
||||
serde_json = "1.0.89"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.4.7", features = [ "chrono", "r2d2"] }
|
||||
diesel_migrations = "1.4.0"
|
||||
diesel = { version = "2.0.2", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "2.0.0"
|
||||
diesel_logger = { version = "0.2.0", optional = true }
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.22.2", features = ["bundled"], optional = true }
|
||||
libsqlite3-sys = { version = "0.25.2", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = "0.8.4"
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
ring = "0.16.20"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "0.8.2", features = ["v4"] }
|
||||
uuid = { version = "1.2.2", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
chrono-tz = "0.5.3"
|
||||
time = "0.2.27"
|
||||
chrono = { version = "0.4.23", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.8.1"
|
||||
time = "0.3.17"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler = "1.2.1"
|
||||
job_scheduler_ng = "2.0.3"
|
||||
|
||||
# TOTP library
|
||||
oath = "0.10.2"
|
||||
|
||||
# Data encoding library
|
||||
data-encoding = "2.3.2"
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.3.3"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "7.2.0"
|
||||
jsonwebtoken = "8.2.0"
|
||||
|
||||
# U2F library
|
||||
u2f = "0.2.0"
|
||||
webauthn-rs = "0.3.0-alpha.7"
|
||||
# TOTP library
|
||||
totp-lite = "2.0.0"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenv = { version = "0.15.0", default-features = false }
|
||||
# WebAuthn libraries
|
||||
webauthn-rs = "0.3.2"
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.8.0"
|
||||
# Handling of URL's for WebAuthn
|
||||
url = "2.3.1"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.14"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# Email libraries
|
||||
tracing = { version = "0.1.26", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled.
|
||||
lettre = { version = "0.10.0-rc.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||
# Email librariese-Base, Update crates and small change.
|
||||
lettre = { version = "0.10.1", features = ["smtp-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.2.0" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.4"
|
||||
|
||||
# Template library
|
||||
handlebars = { version = "4.0.1", features = ["dir_source"] }
|
||||
handlebars = { version = "4.3.5", features = ["dir_source"] }
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.13", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
html5ever = "0.25.1"
|
||||
markup5ever_rcdom = "0.1.0"
|
||||
regex = { version = "1.5.4", features = ["std", "perf"], default-features = false }
|
||||
data-url = "0.1.0"
|
||||
html5gum = "0.5.2"
|
||||
regex = { version = "1.7.0", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.2.0"
|
||||
bytes = "1.3.0"
|
||||
cached = "0.40.0"
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.16.1"
|
||||
cookie_store = "0.19.0"
|
||||
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.34"
|
||||
|
||||
# URL encoding library
|
||||
percent-encoding = "2.1.0"
|
||||
# Punycode conversion
|
||||
idna = "0.2.3"
|
||||
openssl = "0.10.44"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.4.2"
|
||||
|
||||
# Logging panics to logfile instead stderr only
|
||||
backtrace = "0.3.60"
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.5"
|
||||
paste = "1.0.10"
|
||||
governor = "0.5.1"
|
||||
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.14"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.32", features = ["secure"], default-features = false, optional = true }
|
||||
|
||||
[patch.crates-io]
|
||||
# Use newest ring
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
# Using a patched version of multer-rs (Used by Rocket) to fix attachment/send file uploads
|
||||
# Issue: https://github.com/dani-garcia/vaultwarden/issues/2644
|
||||
# Patch: https://github.com/BlackDex/multer-rs/commit/477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a
|
||||
multer = { git = "https://github.com/BlackDex/multer-rs", rev = "477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a" }
|
||||
|
||||
# For favicon extraction from main website
|
||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = 'eb7330b5296c0d43816d1346211b74182bb4ae37' }
|
||||
|
||||
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
|
||||
# to any issues or PRs for almost a year (as of April 2021). This hopefully
|
||||
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
|
||||
# In particular, `cron` has since implemented parsing of some common syntax
|
||||
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
|
||||
job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' }
|
||||
|
||||
# Add support for U2F appid extension compatibility
|
||||
webauthn-rs = { git = 'https://github.com/kanidm/webauthn-rs', rev = '02a99f534127b30c6f4df7f2d42bc24f76dc4211' }
|
||||
# Strip debuginfo from the release builds
|
||||
# Also enable thin LTO for some optimizations
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
|
22
README.md
22
README.md
@@ -1,4 +1,4 @@
|
||||
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/download/)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||
|
||||
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||
|
||||
@@ -7,12 +7,12 @@
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/master/LICENSE.txt)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor Bitwarden, Inc.**
|
||||
|
||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||
|
||||
@@ -58,32 +58,34 @@ If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org]
|
||||
### Sponsors
|
||||
Thanks for your contribution to the project!
|
||||
|
||||
<!--
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/netdadaltd">
|
||||
<img src="https://avatars.githubusercontent.com/u/77323954?s=75&v=4" width="75px;" alt="netdadaltd"/>
|
||||
<a href="https://github.com/username">
|
||||
<img src="https://avatars.githubusercontent.com/u/725423?s=75&v=4" width="75px;" alt="username"/>
|
||||
<br />
|
||||
<sub><b>netDada Ltd.</b></sub>
|
||||
<sub><b>username</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<br/>
|
||||
-->
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/Gyarbij" style="width: 75px">
|
||||
<sub><b>Chono N</b></sub>
|
||||
<a href="https://github.com/themightychris" style="width: 75px">
|
||||
<sub><b>Chris Alfano</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/themightychris">
|
||||
<sub><b>Chris Alfano</b></sub>
|
||||
<a href="https://github.com/numberly" style="width: 75px">
|
||||
<sub><b>Numberly</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
@@ -1,2 +0,0 @@
|
||||
[global.limits]
|
||||
json = 10485760 # 10 MiB
|
56
build.rs
56
build.rs
@@ -9,17 +9,25 @@ fn main() {
|
||||
println!("cargo:rustc-cfg=mysql");
|
||||
#[cfg(feature = "postgresql")]
|
||||
println!("cargo:rustc-cfg=postgresql");
|
||||
#[cfg(feature = "query_logger")]
|
||||
println!("cargo:rustc-cfg=query_logger");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!(
|
||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||
);
|
||||
|
||||
if let Ok(version) = env::var("BWRS_VERSION") {
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
} else {
|
||||
read_git_info().ok();
|
||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||
compile_error!("Query Logging is only allowed during development, it is not intented for production usage!");
|
||||
|
||||
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||
// If neither exist, read from git.
|
||||
let maybe_vaultwarden_version =
|
||||
env::var("VW_VERSION").or_else(|_| env::var("BWRS_VERSION")).or_else(|_| version_from_git_info());
|
||||
|
||||
if let Ok(version) = maybe_vaultwarden_version {
|
||||
println!("cargo:rustc-env=VW_VERSION={version}");
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={version}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,46 +41,40 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||
}
|
||||
|
||||
/// This method reads info from Git, namely tags, branch, and revision
|
||||
fn read_git_info() -> Result<(), std::io::Error> {
|
||||
/// To access these values, use:
|
||||
/// - env!("GIT_EXACT_TAG")
|
||||
/// - env!("GIT_LAST_TAG")
|
||||
/// - env!("GIT_BRANCH")
|
||||
/// - env!("GIT_REV")
|
||||
/// - env!("VW_VERSION")
|
||||
fn version_from_git_info() -> Result<String, std::io::Error> {
|
||||
// The exact tag for the current commit, can be empty when
|
||||
// the current commit doesn't have an associated tag
|
||||
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
|
||||
if let Some(ref exact) = exact_tag {
|
||||
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact);
|
||||
println!("cargo:rustc-env=GIT_EXACT_TAG={exact}");
|
||||
}
|
||||
|
||||
// The last available tag, equal to exact_tag when
|
||||
// the current commit is tagged
|
||||
let last_tag = run(&["git", "describe", "--abbrev=0", "--tags"])?;
|
||||
println!("cargo:rustc-env=GIT_LAST_TAG={}", last_tag);
|
||||
println!("cargo:rustc-env=GIT_LAST_TAG={last_tag}");
|
||||
|
||||
// The current branch name
|
||||
let branch = run(&["git", "rev-parse", "--abbrev-ref", "HEAD"])?;
|
||||
println!("cargo:rustc-env=GIT_BRANCH={}", branch);
|
||||
println!("cargo:rustc-env=GIT_BRANCH={branch}");
|
||||
|
||||
// The current git commit hash
|
||||
let rev = run(&["git", "rev-parse", "HEAD"])?;
|
||||
let rev_short = rev.get(..8).unwrap_or_default();
|
||||
println!("cargo:rustc-env=GIT_REV={}", rev_short);
|
||||
println!("cargo:rustc-env=GIT_REV={rev_short}");
|
||||
|
||||
// Combined version
|
||||
let version = if let Some(exact) = exact_tag {
|
||||
exact
|
||||
if let Some(exact) = exact_tag {
|
||||
Ok(exact)
|
||||
} else if &branch != "main" && &branch != "master" {
|
||||
format!("{}-{} ({})", last_tag, rev_short, branch)
|
||||
Ok(format!("{last_tag}-{rev_short} ({branch})"))
|
||||
} else {
|
||||
format!("{}-{}", last_tag, rev_short)
|
||||
};
|
||||
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
|
||||
// To access these values, use:
|
||||
// env!("GIT_EXACT_TAG")
|
||||
// env!("GIT_LAST_TAG")
|
||||
// env!("GIT_BRANCH")
|
||||
// env!("GIT_REV")
|
||||
// env!("BWRS_VERSION")
|
||||
|
||||
Ok(())
|
||||
Ok(format!("{last_tag}-{rev_short}"))
|
||||
}
|
||||
}
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||
# manifest, rather than the target arch. For example:
|
||||
#
|
||||
|
@@ -1,31 +1,41 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.53" %}
|
||||
{% set build_stage_base_image = "rust:1.66-bullseye" %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "clux/muslrust:nightly-2021-06-24" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.14" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.17" %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.14" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.17" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.17" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.17" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||
{% set runtime_stage_base_image = "debian:bullseye-slim" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:bullseye" %}
|
||||
{% set package_arch_name = "arm64" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:bullseye" %}
|
||||
{% set package_arch_name = "armel" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:bullseye" %}
|
||||
{% set package_arch_name = "armhf" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||
@@ -40,12 +50,17 @@
|
||||
{% else %}
|
||||
{% set package_arch_target_param = "" %}
|
||||
{% endif %}
|
||||
{% if "buildx" in target_file %}
|
||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
||||
{% else %}
|
||||
{% set mount_rust_cache = "" %}
|
||||
{% endif %}
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_version = "2.20.4b" %}
|
||||
{% set vault_image_digest = "sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05" %}
|
||||
{% set vault_version = "v2022.12.0" %}
|
||||
{% set vault_image_digest = "sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e" %}
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
@@ -55,78 +70,75 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:v{{ vault_version }}
|
||||
# $ docker pull vaultwarden/web-vault:{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:{{ vault_version }}
|
||||
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [vaultwarden/web-vault:v{{ vault_version }}]
|
||||
# [vaultwarden/web-vault:{{ vault_version }}]
|
||||
#
|
||||
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
{% set features = "sqlite,postgresql" %}
|
||||
{% else %}
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
ARG DB=sqlite
|
||||
{% set features = "sqlite" %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% set features = "sqlite,mysql,postgresql" %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
{% if "alpine" in target_file %}
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
{% if "armv7" in target_file %}
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
{% if "armv6" in target_file %}
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
||||
{% endif %}
|
||||
{% elif "arm" in target_file %}
|
||||
#
|
||||
# Install required build libs for {{ package_arch_name }} architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture {{ package_arch_name }} \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
libc6-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libpq-dev \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
libmariadb3{{ package_arch_prefix }} \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
{% endif -%}
|
||||
# Set arm specific environment values
|
||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
|
||||
{% if "amd64" in target_file and "alpine" not in target_file %}
|
||||
{% elif "amd64" in target_file %}
|
||||
# Install DB packages
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
|
||||
@@ -139,37 +151,22 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
{% if "alpine" not in target_file %}
|
||||
{% if "arm" in target_file %}
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
||||
|
||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
{% endif -%}
|
||||
{% endif %}
|
||||
{% if package_arch_target is defined %}
|
||||
RUN rustup target add {{ package_arch_target }}
|
||||
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
||||
{% endif %}
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
{% if "alpine" in target_file %}
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
{% else %}
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% endif %}
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
@@ -181,26 +178,22 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "armv7" in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM {{ runtime_stage_base_image }}
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
{%- if "alpine" in runtime_stage_base_image %} \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -211,14 +204,8 @@ RUN mkdir /data \
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
{% if "mysql" in features %}
|
||||
mariadb-connector-c \
|
||||
{% endif %}
|
||||
{% if "postgresql" in features %}
|
||||
postgresql-libs \
|
||||
{% endif %}
|
||||
ca-certificates
|
||||
{% else %}
|
||||
&& apt-get update && apt-get install -y \
|
||||
@@ -226,12 +213,20 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
|
||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
{% endif -%}
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
@@ -244,7 +239,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
{% if package_arch_target is defined %}
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
||||
@@ -257,6 +251,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -7,3 +7,9 @@ all: $(OBJECTS)
|
||||
|
||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildx: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildx.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
@@ -1,3 +1,5 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
@@ -14,33 +16,41 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.20.4b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.20.4b
|
||||
# [vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05
|
||||
# [vaultwarden/web-vault:v2.20.4b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.53 as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install DB packages
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -53,6 +63,9 @@ COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -68,16 +81,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:buster-slim
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
@@ -87,9 +101,9 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
@@ -100,7 +114,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
@@ -109,6 +122,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -1,3 +1,5 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
@@ -14,30 +16,34 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.20.4b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.20.4b
|
||||
# [vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05
|
||||
# [vaultwarden/web-vault:v2.20.4b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM clux/muslrust:nightly-2021-06-24 as build
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -50,6 +56,10 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -65,26 +75,27 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.14
|
||||
FROM alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
postgresql-libs \
|
||||
ca-certificates
|
||||
|
||||
|
||||
@@ -95,7 +106,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
@@ -104,6 +114,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
125
docker/amd64/Dockerfile.buildx
Normal file
125
docker/amd64/Dockerfile.buildx
Normal file
@@ -0,0 +1,125 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install DB packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
117
docker/amd64/Dockerfile.buildx.alpine
Normal file
117
docker/amd64/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,117 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,3 +1,5 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
@@ -14,50 +16,61 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.20.4b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.20.4b
|
||||
# [vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05
|
||||
# [vaultwarden/web-vault:v2.20.4b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.53 as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -68,27 +81,11 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -104,16 +101,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:buster
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -125,9 +123,9 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -140,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
@@ -149,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
121
docker/arm64/Dockerfile.alpine
Normal file
121
docker/arm64/Dockerfile.alpine
Normal file
@@ -0,0 +1,121 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add aarch64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
149
docker/arm64/Dockerfile.buildx
Normal file
149
docker/arm64/Dockerfile.buildx
Normal file
@@ -0,0 +1,149 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
121
docker/arm64/Dockerfile.buildx.alpine
Normal file
121
docker/arm64/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,121 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,3 +1,5 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
@@ -14,50 +16,61 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.20.4b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.20.4b
|
||||
# [vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05
|
||||
# [vaultwarden/web-vault:v2.20.4b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.53 as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -68,27 +81,11 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -104,16 +101,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:buster
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -125,11 +123,16 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
@@ -140,7 +143,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
@@ -149,6 +151,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
123
docker/armv6/Dockerfile.alpine
Normal file
123
docker/armv6/Dockerfile.alpine
Normal file
@@ -0,0 +1,123 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add arm-unknown-linux-musleabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
154
docker/armv6/Dockerfile.buildx
Normal file
154
docker/armv6/Dockerfile.buildx
Normal file
@@ -0,0 +1,154 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
123
docker/armv6/Dockerfile.buildx.alpine
Normal file
123
docker/armv6/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,123 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,3 +1,5 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
@@ -14,50 +16,61 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.20.4b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.20.4b
|
||||
# [vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05
|
||||
# [vaultwarden/web-vault:v2.20.4b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.53 as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armhf \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -68,27 +81,11 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -104,16 +101,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:buster
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -125,9 +123,9 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -140,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
@@ -149,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -1,3 +1,5 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
@@ -14,31 +16,34 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.20.4b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.20.4b
|
||||
# [vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05
|
||||
# [vaultwarden/web-vault:v2.20.4b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:894e266d4491494dd5a8a736855a6772aa146fa14206853b11b41cf3f3f64d05 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.66.0 as build
|
||||
|
||||
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
ARG DB=sqlite
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -51,6 +56,10 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -66,19 +75,19 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.14
|
||||
FROM balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -87,8 +96,8 @@ RUN [ "cross-build-start" ]
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -101,7 +110,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
@@ -110,6 +118,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
149
docker/armv7/Dockerfile.buildx
Normal file
149
docker/armv7/Dockerfile.buildx
Normal file
@@ -0,0 +1,149 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
121
docker/armv7/Dockerfile.buildx.alpine
Normal file
121
docker/armv7/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,121 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -2,8 +2,8 @@
|
||||
|
||||
# Use the value of the corresponding env var (if present),
|
||||
# or a default value otherwise.
|
||||
: ${DATA_FOLDER:="data"}
|
||||
: ${ROCKET_PORT:="80"}
|
||||
: "${DATA_FOLDER:="data"}"
|
||||
: "${ROCKET_PORT:="80"}"
|
||||
|
||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||
|
||||
@@ -45,9 +45,13 @@ if [ -r "${CONFIG_FILE}" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
addr="${ROCKET_ADDRESS}"
|
||||
if [ -z "${addr}" ] || [ "${addr}" = '0.0.0.0' ] || [ "${addr}" = '::' ]; then
|
||||
addr='localhost'
|
||||
fi
|
||||
base_path="$(get_base_path "${DOMAIN}")"
|
||||
if [ -n "${ROCKET_TLS}" ]; then
|
||||
s='s'
|
||||
fi
|
||||
curl --insecure --fail --silent --show-error \
|
||||
"http${s}://localhost:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||
"http${s}://${addr}:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||
|
@@ -9,15 +9,15 @@ fi
|
||||
|
||||
if [ -d /etc/vaultwarden.d ]; then
|
||||
for f in /etc/vaultwarden.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
if [ -r "${f}" ]; then
|
||||
. "${f}"
|
||||
fi
|
||||
done
|
||||
elif [ -d /etc/bitwarden_rs.d ]; then
|
||||
echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###"
|
||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
if [ -r "${f}" ]; then
|
||||
. "${f}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
@@ -7,10 +7,5 @@ arches=(
|
||||
)
|
||||
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
# The Alpine image build currently only works for certain arches.
|
||||
distro_suffix=.alpine
|
||||
arches=(
|
||||
amd64
|
||||
armv7
|
||||
)
|
||||
fi
|
||||
|
@@ -34,12 +34,17 @@ for label in "${LABELS[@]}"; do
|
||||
LABEL_ARGS+=(--label "${label}")
|
||||
done
|
||||
|
||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildx as template
|
||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
||||
buildx_suffix=.buildx
|
||||
fi
|
||||
|
||||
set -ex
|
||||
|
||||
for arch in "${arches[@]}"; do
|
||||
docker build \
|
||||
"${LABEL_ARGS[@]}" \
|
||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||
-f docker/${arch}/Dockerfile${distro_suffix} \
|
||||
-f docker/${arch}/Dockerfile${buildx_suffix}${distro_suffix} \
|
||||
.
|
||||
done
|
||||
|
17
hooks/push
17
hooks/push
@@ -10,7 +10,7 @@ join() { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
set -ex
|
||||
|
||||
echo ">>> Starting local Docker registry..."
|
||||
echo ">>> Starting local Docker registry when needed..."
|
||||
|
||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
||||
# builds, but it can't access existing images on the Docker host (like the
|
||||
@@ -25,7 +25,13 @@ echo ">>> Starting local Docker registry..."
|
||||
# Use host networking so the buildx container can access the registry via
|
||||
# localhost.
|
||||
#
|
||||
docker run -d --name registry --network host registry:2 # defaults to port 5000
|
||||
# First check if there already is a registry container running, else skip it.
|
||||
# This will only happen either locally or running it via Github Actions
|
||||
#
|
||||
if ! timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5000'; then
|
||||
# defaults to port 5000
|
||||
docker run -d --name registry --network host registry:2
|
||||
fi
|
||||
|
||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
||||
@@ -49,7 +55,12 @@ echo ">>> Setting up Docker Buildx..."
|
||||
#
|
||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
||||
#
|
||||
docker buildx create --name builder --use --driver-opt network=host
|
||||
# Check if there already is a builder running, else skip this and use the existing.
|
||||
# This will only happen either locally or running it via Github Actions
|
||||
#
|
||||
if ! docker buildx inspect builder > /dev/null 2>&1 ; then
|
||||
docker buildx create --name builder --use --driver-opt network=host
|
||||
fi
|
||||
|
||||
echo ">>> Running Docker Buildx..."
|
||||
|
||||
|
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN private_key TEXT;
|
||||
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN public_key TEXT;
|
@@ -0,0 +1 @@
|
||||
DROP TABLE emergency_access;
|
@@ -0,0 +1,14 @@
|
||||
CREATE TABLE emergency_access (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
grantor_uuid CHAR(36) REFERENCES users (uuid),
|
||||
grantee_uuid CHAR(36) REFERENCES users (uuid),
|
||||
email VARCHAR(255),
|
||||
key_encrypted TEXT,
|
||||
atype INTEGER NOT NULL,
|
||||
status INTEGER NOT NULL,
|
||||
wait_time_days INTEGER NOT NULL,
|
||||
recovery_initiated_at DATETIME,
|
||||
last_notification_at DATETIME,
|
||||
updated_at DATETIME NOT NULL,
|
||||
created_at DATETIME NOT NULL
|
||||
);
|
@@ -0,0 +1 @@
|
||||
DROP TABLE twofactor_incomplete;
|
@@ -0,0 +1,9 @@
|
||||
CREATE TABLE twofactor_incomplete (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
|
||||
device_uuid CHAR(36) NOT NULL,
|
||||
device_name TEXT NOT NULL,
|
||||
login_time DATETIME NOT NULL,
|
||||
ip_address TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (user_uuid, device_uuid)
|
||||
);
|
2
migrations/mysql/2022-01-17-234911_add_api_key/up.sql
Normal file
2
migrations/mysql/2022-01-17-234911_add_api_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN api_key VARCHAR(255);
|
@@ -0,0 +1,4 @@
|
||||
-- First remove the previous primary key
|
||||
ALTER TABLE devices DROP PRIMARY KEY;
|
||||
-- Add a new combined one
|
||||
ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid);
|
@@ -0,0 +1,3 @@
|
||||
DROP TABLE `groups`;
|
||||
DROP TABLE groups_users;
|
||||
DROP TABLE collections_groups;
|
23
migrations/mysql/2022-07-27-110000_add_group_support/up.sql
Normal file
23
migrations/mysql/2022-07-27-110000_add_group_support/up.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE `groups` (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
organizations_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
|
||||
name VARCHAR(100) NOT NULL,
|
||||
access_all BOOLEAN NOT NULL,
|
||||
external_id VARCHAR(300) NULL,
|
||||
creation_date DATETIME NOT NULL,
|
||||
revision_date DATETIME NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE groups_users (
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES `groups` (uuid),
|
||||
users_organizations_uuid VARCHAR(36) NOT NULL REFERENCES users_organizations (uuid),
|
||||
UNIQUE (groups_uuid, users_organizations_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE collections_groups (
|
||||
collections_uuid VARCHAR(40) NOT NULL REFERENCES collections (uuid),
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES `groups` (uuid),
|
||||
read_only BOOLEAN NOT NULL,
|
||||
hide_passwords BOOLEAN NOT NULL,
|
||||
UNIQUE (collections_uuid, groups_uuid)
|
||||
);
|
1
migrations/mysql/2022-10-18-170602_add_events/down.sql
Normal file
1
migrations/mysql/2022-10-18-170602_add_events/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE event;
|
19
migrations/mysql/2022-10-18-170602_add_events/up.sql
Normal file
19
migrations/mysql/2022-10-18-170602_add_events/up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE event (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
event_type INTEGER NOT NULL,
|
||||
user_uuid CHAR(36),
|
||||
org_uuid CHAR(36),
|
||||
cipher_uuid CHAR(36),
|
||||
collection_uuid CHAR(36),
|
||||
group_uuid CHAR(36),
|
||||
org_user_uuid CHAR(36),
|
||||
act_user_uuid CHAR(36),
|
||||
device_type INTEGER,
|
||||
ip_address TEXT,
|
||||
event_date DATETIME NOT NULL,
|
||||
policy_uuid CHAR(36),
|
||||
provider_uuid CHAR(36),
|
||||
provider_user_uuid CHAR(36),
|
||||
provider_org_uuid CHAR(36),
|
||||
UNIQUE (uuid)
|
||||
);
|
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN private_key TEXT;
|
||||
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN public_key TEXT;
|
@@ -0,0 +1 @@
|
||||
DROP TABLE emergency_access;
|
@@ -0,0 +1,14 @@
|
||||
CREATE TABLE emergency_access (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
grantor_uuid CHAR(36) REFERENCES users (uuid),
|
||||
grantee_uuid CHAR(36) REFERENCES users (uuid),
|
||||
email VARCHAR(255),
|
||||
key_encrypted TEXT,
|
||||
atype INTEGER NOT NULL,
|
||||
status INTEGER NOT NULL,
|
||||
wait_time_days INTEGER NOT NULL,
|
||||
recovery_initiated_at TIMESTAMP,
|
||||
last_notification_at TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL
|
||||
);
|
@@ -0,0 +1 @@
|
||||
DROP TABLE twofactor_incomplete;
|
@@ -0,0 +1,9 @@
|
||||
CREATE TABLE twofactor_incomplete (
|
||||
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
|
||||
device_uuid VARCHAR(40) NOT NULL,
|
||||
device_name TEXT NOT NULL,
|
||||
login_time TIMESTAMP NOT NULL,
|
||||
ip_address TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (user_uuid, device_uuid)
|
||||
);
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN api_key TEXT;
|
@@ -0,0 +1,4 @@
|
||||
-- First remove the previous primary key
|
||||
ALTER TABLE devices DROP CONSTRAINT devices_pkey;
|
||||
-- Add a new combined one
|
||||
ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid);
|
@@ -0,0 +1,3 @@
|
||||
DROP TABLE groups;
|
||||
DROP TABLE groups_users;
|
||||
DROP TABLE collections_groups;
|
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE groups (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
organizations_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
|
||||
name VARCHAR(100) NOT NULL,
|
||||
access_all BOOLEAN NOT NULL,
|
||||
external_id VARCHAR(300) NULL,
|
||||
creation_date TIMESTAMP NOT NULL,
|
||||
revision_date TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE groups_users (
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES groups (uuid),
|
||||
users_organizations_uuid VARCHAR(36) NOT NULL REFERENCES users_organizations (uuid),
|
||||
PRIMARY KEY (groups_uuid, users_organizations_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE collections_groups (
|
||||
collections_uuid VARCHAR(40) NOT NULL REFERENCES collections (uuid),
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES groups (uuid),
|
||||
read_only BOOLEAN NOT NULL,
|
||||
hide_passwords BOOLEAN NOT NULL,
|
||||
PRIMARY KEY (collections_uuid, groups_uuid)
|
||||
);
|
@@ -0,0 +1 @@
|
||||
DROP TABLE event;
|
19
migrations/postgresql/2022-10-18-170602_add_events/up.sql
Normal file
19
migrations/postgresql/2022-10-18-170602_add_events/up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE event (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
event_type INTEGER NOT NULL,
|
||||
user_uuid CHAR(36),
|
||||
org_uuid CHAR(36),
|
||||
cipher_uuid CHAR(36),
|
||||
collection_uuid CHAR(36),
|
||||
group_uuid CHAR(36),
|
||||
org_user_uuid CHAR(36),
|
||||
act_user_uuid CHAR(36),
|
||||
device_type INTEGER,
|
||||
ip_address TEXT,
|
||||
event_date TIMESTAMP NOT NULL,
|
||||
policy_uuid CHAR(36),
|
||||
provider_uuid CHAR(36),
|
||||
provider_user_uuid CHAR(36),
|
||||
provider_org_uuid CHAR(36),
|
||||
UNIQUE (uuid)
|
||||
);
|
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN private_key TEXT;
|
||||
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN public_key TEXT;
|
@@ -0,0 +1 @@
|
||||
DROP TABLE emergency_access;
|
@@ -0,0 +1,14 @@
|
||||
CREATE TABLE emergency_access (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
grantor_uuid TEXT REFERENCES users (uuid),
|
||||
grantee_uuid TEXT REFERENCES users (uuid),
|
||||
email TEXT,
|
||||
key_encrypted TEXT,
|
||||
atype INTEGER NOT NULL,
|
||||
status INTEGER NOT NULL,
|
||||
wait_time_days INTEGER NOT NULL,
|
||||
recovery_initiated_at DATETIME,
|
||||
last_notification_at DATETIME,
|
||||
updated_at DATETIME NOT NULL,
|
||||
created_at DATETIME NOT NULL
|
||||
);
|
@@ -0,0 +1 @@
|
||||
DROP TABLE twofactor_incomplete;
|
@@ -0,0 +1,9 @@
|
||||
CREATE TABLE twofactor_incomplete (
|
||||
user_uuid TEXT NOT NULL REFERENCES users(uuid),
|
||||
device_uuid TEXT NOT NULL,
|
||||
device_name TEXT NOT NULL,
|
||||
login_time DATETIME NOT NULL,
|
||||
ip_address TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (user_uuid, device_uuid)
|
||||
);
|
2
migrations/sqlite/2022-01-17-234911_add_api_key/up.sql
Normal file
2
migrations/sqlite/2022-01-17-234911_add_api_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN api_key TEXT;
|
@@ -0,0 +1,23 @@
|
||||
-- Create new devices table with primary keys on both uuid and user_uuid
|
||||
CREATE TABLE devices_new (
|
||||
uuid TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
atype INTEGER NOT NULL,
|
||||
push_token TEXT,
|
||||
refresh_token TEXT NOT NULL,
|
||||
twofactor_remember TEXT,
|
||||
PRIMARY KEY(uuid, user_uuid),
|
||||
FOREIGN KEY(user_uuid) REFERENCES users(uuid)
|
||||
);
|
||||
|
||||
-- Transfer current data to new table
|
||||
INSERT INTO devices_new SELECT * FROM devices;
|
||||
|
||||
-- Drop the old table
|
||||
DROP TABLE devices;
|
||||
|
||||
-- Rename the new table to the original name
|
||||
ALTER TABLE devices_new RENAME TO devices;
|
@@ -0,0 +1,3 @@
|
||||
DROP TABLE groups;
|
||||
DROP TABLE groups_users;
|
||||
DROP TABLE collections_groups;
|
23
migrations/sqlite/2022-07-27-110000_add_group_support/up.sql
Normal file
23
migrations/sqlite/2022-07-27-110000_add_group_support/up.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE groups (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
organizations_uuid TEXT NOT NULL REFERENCES organizations (uuid),
|
||||
name TEXT NOT NULL,
|
||||
access_all BOOLEAN NOT NULL,
|
||||
external_id TEXT NULL,
|
||||
creation_date TIMESTAMP NOT NULL,
|
||||
revision_date TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE groups_users (
|
||||
groups_uuid TEXT NOT NULL REFERENCES groups (uuid),
|
||||
users_organizations_uuid TEXT NOT NULL REFERENCES users_organizations (uuid),
|
||||
UNIQUE (groups_uuid, users_organizations_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE collections_groups (
|
||||
collections_uuid TEXT NOT NULL REFERENCES collections (uuid),
|
||||
groups_uuid TEXT NOT NULL REFERENCES groups (uuid),
|
||||
read_only BOOLEAN NOT NULL,
|
||||
hide_passwords BOOLEAN NOT NULL,
|
||||
UNIQUE (collections_uuid, groups_uuid)
|
||||
);
|
1
migrations/sqlite/2022-10-18-170602_add_events/down.sql
Normal file
1
migrations/sqlite/2022-10-18-170602_add_events/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE event;
|
19
migrations/sqlite/2022-10-18-170602_add_events/up.sql
Normal file
19
migrations/sqlite/2022-10-18-170602_add_events/up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE event (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
event_type INTEGER NOT NULL,
|
||||
user_uuid TEXT,
|
||||
org_uuid TEXT,
|
||||
cipher_uuid TEXT,
|
||||
collection_uuid TEXT,
|
||||
group_uuid TEXT,
|
||||
org_user_uuid TEXT,
|
||||
act_user_uuid TEXT,
|
||||
device_type INTEGER,
|
||||
ip_address TEXT,
|
||||
event_date DATETIME NOT NULL,
|
||||
policy_uuid TEXT,
|
||||
provider_uuid TEXT,
|
||||
provider_user_uuid TEXT,
|
||||
provider_org_uuid TEXT,
|
||||
UNIQUE (uuid)
|
||||
);
|
93
resources/404.svg
Normal file
93
resources/404.svg
Normal file
@@ -0,0 +1,93 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="500"
|
||||
height="222"
|
||||
viewBox="0 0 500 222"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
xml:space="preserve"
|
||||
inkscape:version="1.2.1 (9c6d41e410, 2022-07-14, custom)"
|
||||
sodipodi:docname="404.svg"
|
||||
inkscape:export-filename="404.png"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
|
||||
id="namedview7"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="px"
|
||||
showgrid="false"
|
||||
inkscape:zoom="1.3791767"
|
||||
inkscape:cx="284.59007"
|
||||
inkscape:cy="214.25826"
|
||||
inkscape:window-width="1916"
|
||||
inkscape:window-height="1038"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="18"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="layer1"
|
||||
showguides="false" /><defs
|
||||
id="defs2"><mask
|
||||
id="holes"><rect
|
||||
x="-60"
|
||||
y="-60"
|
||||
width="120"
|
||||
height="120"
|
||||
fill="#ffffff"
|
||||
id="rect3296" /><circle
|
||||
id="hole"
|
||||
cy="-40"
|
||||
r="3"
|
||||
cx="0" /><use
|
||||
transform="rotate(72)"
|
||||
xlink:href="#hole"
|
||||
id="use3299" /><use
|
||||
transform="rotate(144)"
|
||||
xlink:href="#hole"
|
||||
id="use3301" /><use
|
||||
transform="rotate(-144)"
|
||||
xlink:href="#hole"
|
||||
id="use3303" /><use
|
||||
transform="rotate(-72)"
|
||||
xlink:href="#hole"
|
||||
id="use3305" /></mask></defs><g
|
||||
inkscape:label="Ebene 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"><rect
|
||||
style="fill:none;fill-opacity:0.5;stroke:none;stroke-width:0.74;stroke-opacity:1"
|
||||
id="rect681"
|
||||
width="666"
|
||||
height="222"
|
||||
x="0"
|
||||
y="0" /><text
|
||||
xml:space="preserve"
|
||||
style="font-size:128px;line-height:1.25;font-family:'Open Sans';-inkscape-font-specification:'Open Sans';text-align:center;text-anchor:middle;fill:#000000;fill-opacity:0.7;stroke-width:1"
|
||||
x="249.9375"
|
||||
y="134.8125"
|
||||
id="text3425"><tspan
|
||||
id="tspan3423"
|
||||
x="249.9375"
|
||||
y="134.8125"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:128px;font-family:'Open Sans';-inkscape-font-specification:'Open Sans';text-align:center;text-anchor:middle;fill:#000000;fill-opacity:0.7;stroke-width:1"
|
||||
sodipodi:role="line">404</tspan></text><text
|
||||
xml:space="preserve"
|
||||
style="font-size:26.6667px;line-height:1.25;font-family:'Open Sans';-inkscape-font-specification:'Open Sans';text-align:center;text-anchor:middle"
|
||||
x="249.04297"
|
||||
y="194.68582"
|
||||
id="text4067"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan4065"
|
||||
x="249.04295"
|
||||
y="194.68582"
|
||||
style="font-size:26.6667px;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:0.7">Return to the web vault?</tspan></text></g></svg>
|
After Width: | Height: | Size: 3.3 KiB |
78
resources/vaultwarden-icon-white.svg
Normal file
78
resources/vaultwarden-icon-white.svg
Normal file
@@ -0,0 +1,78 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg version="1.1" viewBox="0 0 256 256" id="svg3917" sodipodi:docname="vaultwarden-icon-white.svg" inkscape:version="1.2.1 (9c6d41e410, 2022-07-14, custom)" width="256" height="256" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<defs id="defs3921" />
|
||||
<sodipodi:namedview id="namedview3919" pagecolor="#000000" bordercolor="#666666" borderopacity="1.0" inkscape:showpageshadow="2" inkscape:pageopacity="0" inkscape:pagecheckerboard="0" inkscape:deskcolor="#202020" showgrid="false" inkscape:zoom="3.3359375" inkscape:cx="128" inkscape:cy="128" inkscape:window-width="1874" inkscape:window-height="1056" inkscape:window-x="46" inkscape:window-y="24" inkscape:window-maximized="1" inkscape:current-layer="svg3917" />
|
||||
<title id="title3817">Vaultwarden Icon - White</title>
|
||||
<g id="logo" transform="matrix(2.4381018,0,0,2.4381018,128,128)">
|
||||
<g id="gear" mask="url(#holes)" stroke="#fff">
|
||||
<path d="m-31.1718-33.813208 26.496029 74.188883h9.3515399l26.49603-74.188883h-9.767164l-16.728866 47.588948q-1.662496 4.571864-2.805462 8.624198-1.142966 3.948427-1.870308 7.585137-.72734199-3.63671-1.8703079-7.689043-1.142966-4.052334-2.805462-8.728104l-16.624959-47.381136z" fill="#fff" stroke-width="4.51171" id="path3819" />
|
||||
<circle transform="scale(-1,1)" r="43" fill="none" stroke-width="9" id="circle3821" />
|
||||
<g id="cogs" transform="scale(-1,1)">
|
||||
<polygon id="cog" points="51 0 46 -3 46 3" fill="#fff" stroke="#fff" stroke-linejoin="round" stroke-width="3" />
|
||||
<g fill="#fff" stroke="#fff" id="g3886">
|
||||
<use transform="rotate(11.25)" xlink:href="#cog" id="use3824" />
|
||||
<use transform="rotate(22.5)" xlink:href="#cog" id="use3826" />
|
||||
<use transform="rotate(33.75)" xlink:href="#cog" id="use3828" />
|
||||
<use transform="rotate(45)" xlink:href="#cog" id="use3830" />
|
||||
<use transform="rotate(56.25)" xlink:href="#cog" id="use3832" />
|
||||
<use transform="rotate(67.5)" xlink:href="#cog" id="use3834" />
|
||||
<use transform="rotate(78.75)" xlink:href="#cog" id="use3836" />
|
||||
<use transform="rotate(90)" xlink:href="#cog" id="use3838" />
|
||||
<use transform="rotate(101.25)" xlink:href="#cog" id="use3840" />
|
||||
<use transform="rotate(112.5)" xlink:href="#cog" id="use3842" />
|
||||
<use transform="rotate(123.75)" xlink:href="#cog" id="use3844" />
|
||||
<use transform="rotate(135)" xlink:href="#cog" id="use3846" />
|
||||
<use transform="rotate(146.25)" xlink:href="#cog" id="use3848" />
|
||||
<use transform="rotate(157.5)" xlink:href="#cog" id="use3850" />
|
||||
<use transform="rotate(168.75)" xlink:href="#cog" id="use3852" />
|
||||
<use transform="scale(-1)" xlink:href="#cog" id="use3854" />
|
||||
<use transform="rotate(191.25)" xlink:href="#cog" id="use3856" />
|
||||
<use transform="rotate(202.5)" xlink:href="#cog" id="use3858" />
|
||||
<use transform="rotate(213.75)" xlink:href="#cog" id="use3860" />
|
||||
<use transform="rotate(225)" xlink:href="#cog" id="use3862" />
|
||||
<use transform="rotate(236.25)" xlink:href="#cog" id="use3864" />
|
||||
<use transform="rotate(247.5)" xlink:href="#cog" id="use3866" />
|
||||
<use transform="rotate(258.75)" xlink:href="#cog" id="use3868" />
|
||||
<use transform="rotate(-90)" xlink:href="#cog" id="use3870" />
|
||||
<use transform="rotate(-78.75)" xlink:href="#cog" id="use3872" />
|
||||
<use transform="rotate(-67.5)" xlink:href="#cog" id="use3874" />
|
||||
<use transform="rotate(-56.25)" xlink:href="#cog" id="use3876" />
|
||||
<use transform="rotate(-45)" xlink:href="#cog" id="use3878" />
|
||||
<use transform="rotate(-33.75)" xlink:href="#cog" id="use3880" />
|
||||
<use transform="rotate(-22.5)" xlink:href="#cog" id="use3882" />
|
||||
<use transform="rotate(-11.25)" xlink:href="#cog" id="use3884" />
|
||||
</g>
|
||||
</g>
|
||||
<g id="mounts" transform="scale(-1,1)">
|
||||
<polygon id="mount" points="0 -35 7 -42 -7 -42" fill="#fff" stroke="#fff" stroke-linejoin="round" stroke-width="6" />
|
||||
<g fill="#fff" stroke="#fff" id="g3898">
|
||||
<use transform="rotate(72)" xlink:href="#mount" id="use3890" />
|
||||
<use transform="rotate(144)" xlink:href="#mount" id="use3892" />
|
||||
<use transform="rotate(216)" xlink:href="#mount" id="use3894" />
|
||||
<use transform="rotate(-72)" xlink:href="#mount" id="use3896" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<mask id="holes">
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#fff" id="rect3902" />
|
||||
<circle id="hole" cy="-40" r="3" />
|
||||
<use transform="rotate(72)" xlink:href="#hole" id="use3905" />
|
||||
<use transform="rotate(144)" xlink:href="#hole" id="use3907" />
|
||||
<use transform="rotate(216)" xlink:href="#hole" id="use3909" />
|
||||
<use transform="rotate(-72)" xlink:href="#hole" id="use3911" />
|
||||
</mask>
|
||||
</g>
|
||||
<metadata id="metadata3915">
|
||||
<rdf:RDF>
|
||||
<cc:Work rdf:about="">
|
||||
<dc:title>Vaultwarden Icon - White</dc:title>
|
||||
<dc:creator>
|
||||
<cc:Agent>
|
||||
<dc:title>Mathijs van Veluw</dc:title>
|
||||
</cc:Agent>
|
||||
</dc:creator>
|
||||
<dc:relation>Rust Logo</dc:relation>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
</svg>
|
After Width: | Height: | Size: 5.5 KiB |
74
resources/vaultwarden-icon.svg
Normal file
74
resources/vaultwarden-icon.svg
Normal file
@@ -0,0 +1,74 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg version="1.1" viewBox="0 0 256 256" id="svg383" sodipodi:docname="vaultwarden-icon.svg" inkscape:version="1.2.1 (9c6d41e410, 2022-07-14, custom)" width="256" height="256" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<defs id="defs387" />
|
||||
<sodipodi:namedview id="namedview385" pagecolor="#ffffff" bordercolor="#666666" borderopacity="1.0" inkscape:showpageshadow="2" inkscape:pageopacity="0.0" inkscape:pagecheckerboard="0" inkscape:deskcolor="#d1d1d1" showgrid="false" inkscape:zoom="3.3359375" inkscape:cx="128" inkscape:cy="128" inkscape:window-width="1874" inkscape:window-height="1056" inkscape:window-x="46" inkscape:window-y="24" inkscape:window-maximized="1" inkscape:current-layer="svg383" />
|
||||
<title id="title287">Vaultwarden Icon</title>
|
||||
<g id="logo" transform="matrix(2.4381018,0,0,2.4381018,128,128)">
|
||||
<g id="gear" mask="url(#holes)">
|
||||
<path d="m-31.1718-33.813208 26.496029 74.188883h9.3515399l26.49603-74.188883h-9.767164l-16.728866 47.588948q-1.662496 4.571864-2.805462 8.624198-1.142966 3.948427-1.870308 7.585137-.72734199-3.63671-1.8703079-7.689043-1.142966-4.052334-2.805462-8.728104l-16.624959-47.381136z" stroke="#000" stroke-width="4.51171" id="path289" />
|
||||
<circle transform="scale(-1,1)" r="43" fill="none" stroke="#000" stroke-width="9" id="circle291" />
|
||||
<g id="cogs" transform="scale(-1,1)">
|
||||
<polygon id="cog" points="51 0 46 -3 46 3" stroke="#000" stroke-linejoin="round" stroke-width="3" />
|
||||
<use transform="rotate(11.25)" xlink:href="#cog" id="use294" />
|
||||
<use transform="rotate(22.5)" xlink:href="#cog" id="use296" />
|
||||
<use transform="rotate(33.75)" xlink:href="#cog" id="use298" />
|
||||
<use transform="rotate(45)" xlink:href="#cog" id="use300" />
|
||||
<use transform="rotate(56.25)" xlink:href="#cog" id="use302" />
|
||||
<use transform="rotate(67.5)" xlink:href="#cog" id="use304" />
|
||||
<use transform="rotate(78.75)" xlink:href="#cog" id="use306" />
|
||||
<use transform="rotate(90)" xlink:href="#cog" id="use308" />
|
||||
<use transform="rotate(101.25)" xlink:href="#cog" id="use310" />
|
||||
<use transform="rotate(112.5)" xlink:href="#cog" id="use312" />
|
||||
<use transform="rotate(123.75)" xlink:href="#cog" id="use314" />
|
||||
<use transform="rotate(135)" xlink:href="#cog" id="use316" />
|
||||
<use transform="rotate(146.25)" xlink:href="#cog" id="use318" />
|
||||
<use transform="rotate(157.5)" xlink:href="#cog" id="use320" />
|
||||
<use transform="rotate(168.75)" xlink:href="#cog" id="use322" />
|
||||
<use transform="scale(-1)" xlink:href="#cog" id="use324" />
|
||||
<use transform="rotate(191.25)" xlink:href="#cog" id="use326" />
|
||||
<use transform="rotate(202.5)" xlink:href="#cog" id="use328" />
|
||||
<use transform="rotate(213.75)" xlink:href="#cog" id="use330" />
|
||||
<use transform="rotate(225)" xlink:href="#cog" id="use332" />
|
||||
<use transform="rotate(236.25)" xlink:href="#cog" id="use334" />
|
||||
<use transform="rotate(247.5)" xlink:href="#cog" id="use336" />
|
||||
<use transform="rotate(258.75)" xlink:href="#cog" id="use338" />
|
||||
<use transform="rotate(-90)" xlink:href="#cog" id="use340" />
|
||||
<use transform="rotate(-78.75)" xlink:href="#cog" id="use342" />
|
||||
<use transform="rotate(-67.5)" xlink:href="#cog" id="use344" />
|
||||
<use transform="rotate(-56.25)" xlink:href="#cog" id="use346" />
|
||||
<use transform="rotate(-45)" xlink:href="#cog" id="use348" />
|
||||
<use transform="rotate(-33.75)" xlink:href="#cog" id="use350" />
|
||||
<use transform="rotate(-22.5)" xlink:href="#cog" id="use352" />
|
||||
<use transform="rotate(-11.25)" xlink:href="#cog" id="use354" />
|
||||
</g>
|
||||
<g id="mounts" transform="scale(-1,1)">
|
||||
<polygon id="mount" points="0 -35 7 -42 -7 -42" stroke="#000" stroke-linejoin="round" stroke-width="6" />
|
||||
<use transform="rotate(72)" xlink:href="#mount" id="use358" />
|
||||
<use transform="rotate(144)" xlink:href="#mount" id="use360" />
|
||||
<use transform="rotate(216)" xlink:href="#mount" id="use362" />
|
||||
<use transform="rotate(-72)" xlink:href="#mount" id="use364" />
|
||||
</g>
|
||||
</g>
|
||||
<mask id="holes">
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#fff" id="rect368" />
|
||||
<circle id="hole" cy="-40" r="3" />
|
||||
<use transform="rotate(72)" xlink:href="#hole" id="use371" />
|
||||
<use transform="rotate(144)" xlink:href="#hole" id="use373" />
|
||||
<use transform="rotate(216)" xlink:href="#hole" id="use375" />
|
||||
<use transform="rotate(-72)" xlink:href="#hole" id="use377" />
|
||||
</mask>
|
||||
</g>
|
||||
<metadata id="metadata381">
|
||||
<rdf:RDF>
|
||||
<cc:Work rdf:about="">
|
||||
<dc:title>Vaultwarden Icon</dc:title>
|
||||
<dc:creator>
|
||||
<cc:Agent>
|
||||
<dc:title>Mathijs van Veluw</dc:title>
|
||||
</cc:Agent>
|
||||
</dc:creator>
|
||||
<dc:relation>Rust Logo</dc:relation>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
</svg>
|
After Width: | Height: | Size: 5.2 KiB |
88
resources/vaultwarden-logo-white.svg
Normal file
88
resources/vaultwarden-logo-white.svg
Normal file
@@ -0,0 +1,88 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg width="1365.8256" height="280.48944" version="1.1" viewBox="0 0 1365.8255 280.48944" id="svg3412" sodipodi:docname="vaultwarden-logo.svg" inkscape:version="1.2.1 (9c6d41e410, 2022-07-14, custom)" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<sodipodi:namedview id="namedview3414" pagecolor="#000000" bordercolor="#666666" borderopacity="1.0" inkscape:showpageshadow="2" inkscape:pageopacity="0.0" inkscape:pagecheckerboard="0" inkscape:deskcolor="#202020" showgrid="false" inkscape:zoom="0.95107314" inkscape:cx="683.4385" inkscape:cy="139.84203" inkscape:window-width="1874" inkscape:window-height="1056" inkscape:window-x="46" inkscape:window-y="24" inkscape:window-maximized="1" inkscape:current-layer="svg3412" />
|
||||
<title id="title3292">Vaultwarden Logo - White</title>
|
||||
<defs id="defs3308">
|
||||
<mask id="holes">
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#fff" id="rect3296" />
|
||||
<circle id="hole" cy="-40" r="3" />
|
||||
<use transform="rotate(72)" xlink:href="#hole" id="use3299" />
|
||||
<use transform="rotate(144)" xlink:href="#hole" id="use3301" />
|
||||
<use transform="rotate(216)" xlink:href="#hole" id="use3303" />
|
||||
<use transform="rotate(-72)" xlink:href="#hole" id="use3305" />
|
||||
</mask>
|
||||
</defs>
|
||||
<text transform="translate(-10.708266,-9.2965379)" x="286.59244" y="223.43649" fill="#e6e6e6" font-family="'Open Sans'" font-size="200px" style="line-height:1.25" xml:space="preserve" id="text3314"><tspan x="286.59244" y="223.43649" font-family="'Open Sans'" font-size="200px" id="tspan3312"><tspan font-family="'Open Sans'" font-size="200px" font-weight="bold" id="tspan3310">ault</tspan>warden</tspan></text>
|
||||
<g transform="translate(-10.708266,-9.2965379)" id="g3410">
|
||||
<g id="logo" transform="matrix(2.6712834,0,0,2.6712834,150.95027,149.53854)">
|
||||
<g id="gear" mask="url(#holes)">
|
||||
<path d="m-31.1718-33.813208 26.496029 74.188883h9.3515399l26.49603-74.188883h-9.767164l-16.728866 47.588948q-1.662496 4.571864-2.805462 8.624198-1.142966 3.948427-1.870308 7.585137-.72734199-3.63671-1.8703079-7.689043-1.142966-4.052334-2.805462-8.728104l-16.624959-47.381136z" fill="#e6e6e6" stroke="#e6e6e6" stroke-width="4.51171" id="path3316" />
|
||||
<circle transform="scale(-1,1)" r="43" fill="none" stroke="#e6e6e6" stroke-width="9" id="circle3318" />
|
||||
<g id="cogs" transform="scale(-1,1)">
|
||||
<polygon id="cog" points="46 -3 46 3 51 0" fill="#e6e6e6" stroke="#e6e6e6" stroke-linejoin="round" stroke-width="3" />
|
||||
<use transform="rotate(11.25)" xlink:href="#cog" id="use3321" />
|
||||
<use transform="rotate(22.5)" xlink:href="#cog" id="use3323" />
|
||||
<use transform="rotate(33.75)" xlink:href="#cog" id="use3325" />
|
||||
<use transform="rotate(45)" xlink:href="#cog" id="use3327" />
|
||||
<use transform="rotate(56.25)" xlink:href="#cog" id="use3329" />
|
||||
<use transform="rotate(67.5)" xlink:href="#cog" id="use3331" />
|
||||
<use transform="rotate(78.75)" xlink:href="#cog" id="use3333" />
|
||||
<use transform="rotate(90)" xlink:href="#cog" id="use3335" />
|
||||
<use transform="rotate(101.25)" xlink:href="#cog" id="use3337" />
|
||||
<use transform="rotate(112.5)" xlink:href="#cog" id="use3339" />
|
||||
<use transform="rotate(123.75)" xlink:href="#cog" id="use3341" />
|
||||
<use transform="rotate(135)" xlink:href="#cog" id="use3343" />
|
||||
<use transform="rotate(146.25)" xlink:href="#cog" id="use3345" />
|
||||
<use transform="rotate(157.5)" xlink:href="#cog" id="use3347" />
|
||||
<use transform="rotate(168.75)" xlink:href="#cog" id="use3349" />
|
||||
<use transform="scale(-1)" xlink:href="#cog" id="use3351" />
|
||||
<use transform="rotate(191.25)" xlink:href="#cog" id="use3353" />
|
||||
<use transform="rotate(202.5)" xlink:href="#cog" id="use3355" />
|
||||
<use transform="rotate(213.75)" xlink:href="#cog" id="use3357" />
|
||||
<use transform="rotate(225)" xlink:href="#cog" id="use3359" />
|
||||
<use transform="rotate(236.25)" xlink:href="#cog" id="use3361" />
|
||||
<use transform="rotate(247.5)" xlink:href="#cog" id="use3363" />
|
||||
<use transform="rotate(258.75)" xlink:href="#cog" id="use3365" />
|
||||
<use transform="rotate(-90)" xlink:href="#cog" id="use3367" />
|
||||
<use transform="rotate(-78.75)" xlink:href="#cog" id="use3369" />
|
||||
<use transform="rotate(-67.5)" xlink:href="#cog" id="use3371" />
|
||||
<use transform="rotate(-56.25)" xlink:href="#cog" id="use3373" />
|
||||
<use transform="rotate(-45)" xlink:href="#cog" id="use3375" />
|
||||
<use transform="rotate(-33.75)" xlink:href="#cog" id="use3377" />
|
||||
<use transform="rotate(-22.5)" xlink:href="#cog" id="use3379" />
|
||||
<use transform="rotate(-11.25)" xlink:href="#cog" id="use3381" />
|
||||
</g>
|
||||
<g id="mounts" transform="scale(-1,1)">
|
||||
<polygon id="mount" points="7 -42 -7 -42 0 -35" stroke="#e6e6e6" stroke-linejoin="round" stroke-width="6" />
|
||||
<use transform="rotate(72)" xlink:href="#mount" id="use3385" />
|
||||
<use transform="rotate(144)" xlink:href="#mount" id="use3387" />
|
||||
<use transform="rotate(216)" xlink:href="#mount" id="use3389" />
|
||||
<use transform="rotate(-72)" xlink:href="#mount" id="use3391" />
|
||||
</g>
|
||||
</g>
|
||||
<mask id="mask3407">
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#e6e6e6" id="rect3395" />
|
||||
<circle cy="-40" r="3" id="circle3397" />
|
||||
<use transform="rotate(72)" xlink:href="#hole" id="use3399" />
|
||||
<use transform="rotate(144)" xlink:href="#hole" id="use3401" />
|
||||
<use transform="rotate(216)" xlink:href="#hole" id="use3403" />
|
||||
<use transform="rotate(-72)" xlink:href="#hole" id="use3405" />
|
||||
</mask>
|
||||
</g>
|
||||
</g>
|
||||
<metadata id="metadata3294">
|
||||
<rdf:RDF>
|
||||
<cc:Work rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title>Vaultwarden Logo - White</dc:title>
|
||||
<dc:creator>
|
||||
<cc:Agent>
|
||||
<dc:title>Mathijs van Veluw</dc:title>
|
||||
</cc:Agent>
|
||||
</dc:creator>
|
||||
<dc:relation>Rust Logo</dc:relation>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
</svg>
|
After Width: | Height: | Size: 6.5 KiB |
88
resources/vaultwarden-logo.svg
Normal file
88
resources/vaultwarden-logo.svg
Normal file
@@ -0,0 +1,88 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg width="1365.8256" height="280.48944" version="1.1" viewBox="0 0 1365.8255 280.48944" id="svg3412" sodipodi:docname="vaultwarden-logo.svg" inkscape:version="1.2.1 (9c6d41e410, 2022-07-14, custom)" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<sodipodi:namedview id="namedview3414" pagecolor="#ffffff" bordercolor="#666666" borderopacity="1.0" inkscape:showpageshadow="2" inkscape:pageopacity="0.0" inkscape:pagecheckerboard="0" inkscape:deskcolor="#d1d1d1" showgrid="false" inkscape:zoom="0.95107314" inkscape:cx="683.4385" inkscape:cy="139.84203" inkscape:window-width="1874" inkscape:window-height="1056" inkscape:window-x="46" inkscape:window-y="24" inkscape:window-maximized="1" inkscape:current-layer="svg3412" />
|
||||
<title id="title3292">Vaultwarden Logo</title>
|
||||
<defs id="defs3308">
|
||||
<mask id="holes">
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#fff" id="rect3296" />
|
||||
<circle id="hole" cy="-40" r="3" />
|
||||
<use transform="rotate(72)" xlink:href="#hole" id="use3299" />
|
||||
<use transform="rotate(144)" xlink:href="#hole" id="use3301" />
|
||||
<use transform="rotate(216)" xlink:href="#hole" id="use3303" />
|
||||
<use transform="rotate(-72)" xlink:href="#hole" id="use3305" />
|
||||
</mask>
|
||||
</defs>
|
||||
<text transform="translate(-10.708266,-9.2965379)" x="286.59244" y="223.43649" fill="#000000" font-family="'Open Sans'" font-size="200px" style="line-height:1.25" xml:space="preserve" id="text3314"><tspan x="286.59244" y="223.43649" font-family="'Open Sans'" font-size="200px" id="tspan3312"><tspan font-family="'Open Sans'" font-size="200px" font-weight="bold" id="tspan3310">ault</tspan>warden</tspan></text>
|
||||
<g transform="translate(-10.708266,-9.2965379)" id="g3410">
|
||||
<g id="logo" transform="matrix(2.6712834,0,0,2.6712834,150.95027,149.53854)">
|
||||
<g id="gear" mask="url(#holes)">
|
||||
<path d="m-31.1718-33.813208 26.496029 74.188883h9.3515399l26.49603-74.188883h-9.767164l-16.728866 47.588948q-1.662496 4.571864-2.805462 8.624198-1.142966 3.948427-1.870308 7.585137-.72734199-3.63671-1.8703079-7.689043-1.142966-4.052334-2.805462-8.728104l-16.624959-47.381136z" stroke="#000" stroke-width="4.51171" id="path3316" />
|
||||
<circle transform="scale(-1,1)" r="43" fill="none" stroke="#000" stroke-width="9" id="circle3318" />
|
||||
<g id="cogs" transform="scale(-1,1)">
|
||||
<polygon id="cog" points="46 -3 46 3 51 0" stroke="#000" stroke-linejoin="round" stroke-width="3" />
|
||||
<use transform="rotate(11.25)" xlink:href="#cog" id="use3321" />
|
||||
<use transform="rotate(22.5)" xlink:href="#cog" id="use3323" />
|
||||
<use transform="rotate(33.75)" xlink:href="#cog" id="use3325" />
|
||||
<use transform="rotate(45)" xlink:href="#cog" id="use3327" />
|
||||
<use transform="rotate(56.25)" xlink:href="#cog" id="use3329" />
|
||||
<use transform="rotate(67.5)" xlink:href="#cog" id="use3331" />
|
||||
<use transform="rotate(78.75)" xlink:href="#cog" id="use3333" />
|
||||
<use transform="rotate(90)" xlink:href="#cog" id="use3335" />
|
||||
<use transform="rotate(101.25)" xlink:href="#cog" id="use3337" />
|
||||
<use transform="rotate(112.5)" xlink:href="#cog" id="use3339" />
|
||||
<use transform="rotate(123.75)" xlink:href="#cog" id="use3341" />
|
||||
<use transform="rotate(135)" xlink:href="#cog" id="use3343" />
|
||||
<use transform="rotate(146.25)" xlink:href="#cog" id="use3345" />
|
||||
<use transform="rotate(157.5)" xlink:href="#cog" id="use3347" />
|
||||
<use transform="rotate(168.75)" xlink:href="#cog" id="use3349" />
|
||||
<use transform="scale(-1)" xlink:href="#cog" id="use3351" />
|
||||
<use transform="rotate(191.25)" xlink:href="#cog" id="use3353" />
|
||||
<use transform="rotate(202.5)" xlink:href="#cog" id="use3355" />
|
||||
<use transform="rotate(213.75)" xlink:href="#cog" id="use3357" />
|
||||
<use transform="rotate(225)" xlink:href="#cog" id="use3359" />
|
||||
<use transform="rotate(236.25)" xlink:href="#cog" id="use3361" />
|
||||
<use transform="rotate(247.5)" xlink:href="#cog" id="use3363" />
|
||||
<use transform="rotate(258.75)" xlink:href="#cog" id="use3365" />
|
||||
<use transform="rotate(-90)" xlink:href="#cog" id="use3367" />
|
||||
<use transform="rotate(-78.75)" xlink:href="#cog" id="use3369" />
|
||||
<use transform="rotate(-67.5)" xlink:href="#cog" id="use3371" />
|
||||
<use transform="rotate(-56.25)" xlink:href="#cog" id="use3373" />
|
||||
<use transform="rotate(-45)" xlink:href="#cog" id="use3375" />
|
||||
<use transform="rotate(-33.75)" xlink:href="#cog" id="use3377" />
|
||||
<use transform="rotate(-22.5)" xlink:href="#cog" id="use3379" />
|
||||
<use transform="rotate(-11.25)" xlink:href="#cog" id="use3381" />
|
||||
</g>
|
||||
<g id="mounts" transform="scale(-1,1)">
|
||||
<polygon id="mount" points="7 -42 -7 -42 0 -35" stroke="#000" stroke-linejoin="round" stroke-width="6" />
|
||||
<use transform="rotate(72)" xlink:href="#mount" id="use3385" />
|
||||
<use transform="rotate(144)" xlink:href="#mount" id="use3387" />
|
||||
<use transform="rotate(216)" xlink:href="#mount" id="use3389" />
|
||||
<use transform="rotate(-72)" xlink:href="#mount" id="use3391" />
|
||||
</g>
|
||||
</g>
|
||||
<mask id="mask3407">
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#fff" id="rect3395" />
|
||||
<circle cy="-40" r="3" id="circle3397" />
|
||||
<use transform="rotate(72)" xlink:href="#hole" id="use3399" />
|
||||
<use transform="rotate(144)" xlink:href="#hole" id="use3401" />
|
||||
<use transform="rotate(216)" xlink:href="#hole" id="use3403" />
|
||||
<use transform="rotate(-72)" xlink:href="#hole" id="use3405" />
|
||||
</mask>
|
||||
</g>
|
||||
</g>
|
||||
<metadata id="metadata3294">
|
||||
<rdf:RDF>
|
||||
<cc:Work rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title>Vaultwarden Logo</dc:title>
|
||||
<dc:creator>
|
||||
<cc:Agent>
|
||||
<dc:title>Mathijs van Veluw</dc:title>
|
||||
</cc:Agent>
|
||||
</dc:creator>
|
||||
<dc:relation>Rust Logo</dc:relation>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
</svg>
|
After Width: | Height: | Size: 6.5 KiB |
@@ -1 +1 @@
|
||||
nightly-2021-06-24
|
||||
1.66.0
|
||||
|
@@ -1,7 +1,7 @@
|
||||
version = "Two"
|
||||
edition = "2018"
|
||||
# version = "Two"
|
||||
edition = "2021"
|
||||
max_width = 120
|
||||
newline_style = "Unix"
|
||||
use_small_heuristics = "Off"
|
||||
struct_lit_single_line = false
|
||||
overflow_delimited_expr = true
|
||||
# struct_lit_single_line = false
|
||||
# overflow_delimited_expr = true
|
||||
|
482
src/api/admin.rs
482
src/api/admin.rs
@@ -1,25 +1,28 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
use std::{env, time::Duration};
|
||||
use std::env;
|
||||
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{
|
||||
http::{Cookie, Cookies, SameSite, Status},
|
||||
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||
response::{content::Html, Flash, Redirect},
|
||||
Route,
|
||||
form::Form,
|
||||
http::{Cookie, CookieJar, MediaType, SameSite, Status},
|
||||
request::{FromRequest, Outcome, Request},
|
||||
response::{content::RawHtml as Html, Redirect},
|
||||
Catcher, Route,
|
||||
};
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||
api::{core::log_event, ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||
error::{Error, MapResult},
|
||||
mail,
|
||||
util::{format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker},
|
||||
CONFIG,
|
||||
util::{
|
||||
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
||||
},
|
||||
CONFIG, VERSION,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
@@ -28,7 +31,6 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
routes![
|
||||
admin_login,
|
||||
get_users_json,
|
||||
get_user_json,
|
||||
post_admin_login,
|
||||
@@ -54,6 +56,14 @@ pub fn routes() -> Vec<Route> {
|
||||
]
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||
catchers![]
|
||||
} else {
|
||||
catchers![admin_login]
|
||||
}
|
||||
}
|
||||
|
||||
static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||
DbConnType::from_url(&CONFIG.database_url())
|
||||
.map(|t| match t {
|
||||
@@ -72,33 +82,26 @@ fn admin_disabled() -> &'static str {
|
||||
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
|
||||
}
|
||||
|
||||
const COOKIE_NAME: &str = "BWRS_ADMIN";
|
||||
const COOKIE_NAME: &str = "VW_ADMIN";
|
||||
const ADMIN_PATH: &str = "/admin";
|
||||
const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z";
|
||||
|
||||
const BASE_TEMPLATE: &str = "admin/base";
|
||||
const VERSION: Option<&str> = option_env!("BWRS_VERSION");
|
||||
|
||||
const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000";
|
||||
|
||||
fn admin_path() -> String {
|
||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||
}
|
||||
|
||||
struct Referer(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct IpHeader(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for IpHeader {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
||||
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
||||
} else if req.headers().get_one("X-Client-IP").is_some() {
|
||||
@@ -113,35 +116,37 @@ impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Used for `Location` response headers, which must specify an absolute URI
|
||||
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||
fn admin_url(referer: Referer) -> String {
|
||||
// If we get a referer use that to make it work when, DOMAIN is not set
|
||||
if let Some(mut referer) = referer.0 {
|
||||
if let Some(start_index) = referer.find(ADMIN_PATH) {
|
||||
referer.truncate(start_index + ADMIN_PATH.len());
|
||||
return referer;
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.domain_set() {
|
||||
// Don't use CONFIG.domain() directly, since the user may want to keep a
|
||||
// trailing slash there, particularly when running under a subpath.
|
||||
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
|
||||
} else {
|
||||
// Last case, when no referer or domain set, technically invalid but better than nothing
|
||||
ADMIN_PATH.to_string()
|
||||
}
|
||||
fn admin_url() -> String {
|
||||
format!("{}{}", CONFIG.domain_origin(), admin_path())
|
||||
}
|
||||
|
||||
#[get("/", rank = 2)]
|
||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
#[derive(Responder)]
|
||||
enum AdminResponse {
|
||||
#[response(status = 200)]
|
||||
Ok(ApiResult<Html<String>>),
|
||||
#[response(status = 401)]
|
||||
Unauthorized(ApiResult<Html<String>>),
|
||||
#[response(status = 429)]
|
||||
TooManyRequests(ApiResult<Html<String>>),
|
||||
}
|
||||
|
||||
#[catch(401)]
|
||||
fn admin_login(request: &Request<'_>) -> ApiResult<Html<String>> {
|
||||
if request.format() == Some(&MediaType::JSON) {
|
||||
err_code!("Authorization failed.", Status::Unauthorized.code);
|
||||
}
|
||||
let redirect = request.segments::<std::path::PathBuf>(0..).unwrap_or_default().display().to_string();
|
||||
render_admin_login(None, Some(redirect))
|
||||
}
|
||||
|
||||
fn render_admin_login(msg: Option<&str>, redirect: Option<String>) -> ApiResult<Html<String>> {
|
||||
// If there is an error, show it
|
||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||
let msg = msg.map(|msg| format!("Error: {msg}"));
|
||||
let json = json!({
|
||||
"page_content": "admin/login",
|
||||
"version": VERSION,
|
||||
"error": msg,
|
||||
"redirect": redirect,
|
||||
"urlpath": CONFIG.domain_path()
|
||||
});
|
||||
|
||||
@@ -153,21 +158,25 @@ fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
#[derive(FromForm)]
|
||||
struct LoginForm {
|
||||
token: String,
|
||||
redirect: Option<String>,
|
||||
}
|
||||
|
||||
#[post("/", data = "<data>")]
|
||||
fn post_admin_login(
|
||||
data: Form<LoginForm>,
|
||||
mut cookies: Cookies,
|
||||
ip: ClientIp,
|
||||
referer: Referer,
|
||||
) -> Result<Redirect, Flash<Redirect>> {
|
||||
fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> Result<Redirect, AdminResponse> {
|
||||
let data = data.into_inner();
|
||||
let redirect = data.redirect;
|
||||
|
||||
if crate::ratelimit::check_limit_admin(&ip.ip).is_err() {
|
||||
return Err(AdminResponse::TooManyRequests(render_admin_login(
|
||||
Some("Too many requests, try again later."),
|
||||
redirect,
|
||||
)));
|
||||
}
|
||||
|
||||
// If the token is invalid, redirect to login page
|
||||
if !_validate_token(&data.token) {
|
||||
error!("Invalid admin token. IP: {}", ip.ip);
|
||||
Err(Flash::error(Redirect::to(admin_url(referer)), "Invalid admin token, please try again."))
|
||||
Err(AdminResponse::Unauthorized(render_admin_login(Some("Invalid admin token, please try again."), redirect)))
|
||||
} else {
|
||||
// If the token received is valid, generate JWT and save it as a cookie
|
||||
let claims = generate_admin_claims();
|
||||
@@ -175,13 +184,17 @@ fn post_admin_login(
|
||||
|
||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||
.path(admin_path())
|
||||
.max_age(time::Duration::minutes(20))
|
||||
.max_age(rocket::time::Duration::minutes(20))
|
||||
.same_site(SameSite::Strict)
|
||||
.http_only(true)
|
||||
.finish();
|
||||
|
||||
cookies.add(cookie);
|
||||
Ok(Redirect::to(admin_url(referer)))
|
||||
if let Some(redirect) = redirect {
|
||||
Ok(Redirect::to(format!("{}{}", admin_path(), redirect)))
|
||||
} else {
|
||||
Err(AdminResponse::Ok(render_admin_page()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -233,20 +246,24 @@ impl AdminTemplateData {
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/", rank = 1)]
|
||||
fn admin_page(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
||||
fn render_admin_page() -> ApiResult<Html<String>> {
|
||||
let text = AdminTemplateData::new().render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/")]
|
||||
fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
|
||||
render_admin_page()
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct InviteData {
|
||||
email: String,
|
||||
}
|
||||
|
||||
fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(uuid, conn) {
|
||||
async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(uuid, conn).await {
|
||||
Ok(user)
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
@@ -254,128 +271,147 @@ fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
}
|
||||
|
||||
#[post("/invite", data = "<data>")]
|
||||
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
let email = data.email.clone();
|
||||
if User::find_by_mail(&data.email, &conn).is_some() {
|
||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||
err_code!("User already exists", Status::Conflict.code)
|
||||
}
|
||||
|
||||
let mut user = User::new(email);
|
||||
|
||||
// TODO: After try_blocks is stabilized, this can be made more readable
|
||||
// See: https://github.com/rust-lang/rust/issues/31436
|
||||
(|| {
|
||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)?;
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
let invitation = Invitation::new(data.email);
|
||||
invitation.save(&conn)?;
|
||||
let invitation = Invitation::new(&user.email);
|
||||
invitation.save(conn).await
|
||||
}
|
||||
}
|
||||
|
||||
user.save(&conn)
|
||||
})()
|
||||
.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
_generate_invite(&user, &mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
user.save(&mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[post("/test/smtp", data = "<data>")]
|
||||
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_test(&data.email)
|
||||
mail::send_test(&data.email).await
|
||||
} else {
|
||||
err!("Mail is not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/logout")]
|
||||
fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
Redirect::to(admin_url(referer))
|
||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
Redirect::to(admin_path())
|
||||
}
|
||||
|
||||
#[get("/users")]
|
||||
fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||
let users = User::get_all(&conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||
async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
||||
let mut users_json = Vec::new();
|
||||
for u in User::get_all(&mut conn).await {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
users_json.push(usr);
|
||||
}
|
||||
|
||||
Json(Value::Array(users_json))
|
||||
}
|
||||
|
||||
#[get("/users/overview")]
|
||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||
let users_json: Vec<Value> = users
|
||||
.iter()
|
||||
.map(|u| {
|
||||
let mut usr = u.to_json(&conn);
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||
usr["last_active"] = match u.last_active(&conn) {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||
None => json!("Never"),
|
||||
};
|
||||
usr
|
||||
})
|
||||
.collect();
|
||||
async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let mut users_json = Vec::new();
|
||||
for u in User::get_all(&mut conn).await {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||
None => json!("Never"),
|
||||
};
|
||||
users_json.push(usr);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>")]
|
||||
fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let user = get_user_or_404(&uuid, &conn)?;
|
||||
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(&uuid, &mut conn).await?;
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(&uuid, &conn)?;
|
||||
user.delete(&conn)
|
||||
async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn, ip: ClientIp) -> EmptyResult {
|
||||
let user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
|
||||
// Get the user_org records before deleting the actual user
|
||||
let user_orgs = UserOrganization::find_any_state_by_user(&uuid, &mut conn).await;
|
||||
let res = user.delete(&mut conn).await;
|
||||
|
||||
for user_org in user_orgs {
|
||||
log_event(
|
||||
EventType::OrganizationUserRemoved as i32,
|
||||
&user_org.uuid,
|
||||
user_org.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
async fn deauth_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
async fn disable_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
async fn enable_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
user.enabled = true;
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
async fn remove_2fa(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -386,13 +422,19 @@ struct UserOrgTypeData {
|
||||
}
|
||||
|
||||
#[post("/users/org_type", data = "<data>")]
|
||||
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
async fn update_user_org_type(
|
||||
data: Json<UserOrgTypeData>,
|
||||
_token: AdminToken,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
let data: UserOrgTypeData = data.into_inner();
|
||||
|
||||
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
let mut user_to_edit =
|
||||
match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
|
||||
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
@@ -400,46 +442,66 @@ fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: D
|
||||
};
|
||||
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there are at least another owner
|
||||
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
if num_owners <= 1 {
|
||||
// Removing owner permmission, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 {
|
||||
err!("Can't change the type of the last owner")
|
||||
}
|
||||
}
|
||||
|
||||
user_to_edit.atype = new_type as i32;
|
||||
user_to_edit.save(&conn)
|
||||
// This check is also done at api::organizations::{accept_invite(), _confirm_invite, _activate_user(), edit_user()}, update_user_org_type
|
||||
// It returns different error messages per function.
|
||||
if new_type < UserOrgType::Admin {
|
||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
||||
Ok(_) => {}
|
||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
||||
}
|
||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationUserUpdated as i32,
|
||||
&user_to_edit.uuid,
|
||||
data.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
user_to_edit.atype = new_type;
|
||||
user_to_edit.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/update_revision")]
|
||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&conn)
|
||||
async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&mut conn).await
|
||||
}
|
||||
|
||||
#[get("/organizations/overview")]
|
||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations = Organization::get_all(&conn);
|
||||
let organizations_json: Vec<Value> = organizations
|
||||
.iter()
|
||||
.map(|o| {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||
org
|
||||
})
|
||||
.collect();
|
||||
async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let mut organizations_json = Vec::new();
|
||||
for o in Organization::get_all(&mut conn).await {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
||||
organizations_json.push(org);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/organizations/<uuid>/delete")]
|
||||
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
||||
org.delete(&conn)
|
||||
async fn delete_organization(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &mut conn).await.map_res("Organization doesn't exist")?;
|
||||
org.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -457,62 +519,37 @@ struct GitCommit {
|
||||
sha: String,
|
||||
}
|
||||
|
||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
async fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let github_api = get_reqwest_client();
|
||||
|
||||
Ok(github_api.get(url).timeout(Duration::from_secs(10)).send()?.error_for_status()?.json::<T>()?)
|
||||
Ok(github_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
}
|
||||
|
||||
fn has_http_access() -> bool {
|
||||
async fn has_http_access() -> bool {
|
||||
let http_access = get_reqwest_client();
|
||||
|
||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").timeout(Duration::from_secs(10)).send() {
|
||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await {
|
||||
Ok(r) => r.status().is_success(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use crate::util::read_file_string;
|
||||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "bwrs-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
let has_http_access = has_http_access();
|
||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||
|| env::var_os("http_proxy").is_some()
|
||||
|| env::var_os("HTTPS_PROXY").is_some()
|
||||
|| env::var_os("https_proxy").is_some();
|
||||
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
use cached::proc_macro::cached;
|
||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
||||
#[cached(time = 300, sync_writes = true)]
|
||||
async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) {
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||
if has_http_access {
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") {
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main") {
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await
|
||||
{
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
@@ -526,7 +563,9 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
} else {
|
||||
match get_github_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
}
|
||||
@@ -534,8 +573,43 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
)
|
||||
} else {
|
||||
("-".to_string(), "-".to_string(), "-".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
let has_http_access = has_http_access().await;
|
||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||
|| env::var_os("http_proxy").is_some()
|
||||
|| env::var_os("HTTPS_PROXY").is_some()
|
||||
|| env::var_os("https_proxy").is_some();
|
||||
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) =
|
||||
get_release_info(has_http_access, running_within_docker).await;
|
||||
|
||||
let ip_header_name = match &ip_header.0 {
|
||||
Some(h) => h,
|
||||
_ => "",
|
||||
@@ -549,6 +623,7 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"latest_web_build": latest_web_build,
|
||||
"running_within_docker": running_within_docker,
|
||||
"docker_base_image": docker_base_image(),
|
||||
"has_http_access": has_http_access,
|
||||
"ip_header_exists": &ip_header.0.is_some(),
|
||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||
@@ -556,8 +631,8 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
"ip_header_config": &CONFIG.ip_header(),
|
||||
"uses_proxy": uses_proxy,
|
||||
"db_type": *DB_TYPE,
|
||||
"db_version": get_sql_server_version(&conn),
|
||||
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||
"db_version": get_sql_server_version(&mut conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||
@@ -585,9 +660,9 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/config/backup_db")]
|
||||
fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
if *CAN_BACKUP {
|
||||
backup_database(&conn)
|
||||
backup_database(&mut conn).await
|
||||
} else {
|
||||
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||
}
|
||||
@@ -595,33 +670,34 @@ fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
|
||||
pub struct AdminToken {}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for AdminToken {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for AdminToken {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
if CONFIG.disable_admin_token() {
|
||||
Outcome::Success(AdminToken {})
|
||||
Outcome::Success(Self {})
|
||||
} else {
|
||||
let mut cookies = request.cookies();
|
||||
let cookies = request.cookies();
|
||||
|
||||
let access_token = match cookies.get(COOKIE_NAME) {
|
||||
Some(cookie) => cookie.value(),
|
||||
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
|
||||
None => return Outcome::Failure((Status::Unauthorized, "Unauthorized")),
|
||||
};
|
||||
|
||||
let ip = match request.guard::<ClientIp>() {
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip.ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
if decode_admin(access_token).is_err() {
|
||||
// Remove admin cookie
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
||||
return Outcome::Forward(());
|
||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
||||
}
|
||||
|
||||
Outcome::Success(AdminToken {})
|
||||
Outcome::Success(Self {})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,10 +1,12 @@
|
||||
use chrono::Utc;
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||
api::{
|
||||
core::log_user_event, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType,
|
||||
},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, ClientIp, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
@@ -34,12 +36,15 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
password_hint,
|
||||
prelogin,
|
||||
verify_password,
|
||||
api_key,
|
||||
rotate_api_key,
|
||||
get_known_device,
|
||||
]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct RegisterData {
|
||||
pub struct RegisterData {
|
||||
Email: String,
|
||||
Kdf: Option<i32>,
|
||||
KdfIterations: Option<i32>,
|
||||
@@ -49,6 +54,7 @@ struct RegisterData {
|
||||
MasterPasswordHint: Option<String>,
|
||||
Name: Option<String>,
|
||||
Token: Option<String>,
|
||||
#[allow(dead_code)]
|
||||
OrganizationUserId: Option<String>,
|
||||
}
|
||||
|
||||
@@ -59,36 +65,74 @@ struct KeysData {
|
||||
PublicKey: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
let data: RegisterData = data.into_inner().data;
|
||||
/// Trims whitespace from password hints, and converts blank password hints to `None`.
|
||||
fn clean_password_hint(password_hint: &Option<String>) -> Option<String> {
|
||||
match password_hint {
|
||||
None => None,
|
||||
Some(h) => match h.trim() {
|
||||
"" => None,
|
||||
ht => Some(ht.to_string()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
||||
Some(user) => {
|
||||
fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult {
|
||||
if password_hint.is_some() && !CONFIG.password_hints_allowed() {
|
||||
err!("Password hints have been disabled by the administrator. Remove the hint and try again.");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, conn).await
|
||||
}
|
||||
|
||||
pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> JsonResult {
|
||||
let data: RegisterData = data.into_inner().data;
|
||||
let email = data.Email.to_lowercase();
|
||||
|
||||
// Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
|
||||
// This also prevents issues with very long usernames causing to large JWT's. See #2419
|
||||
if let Some(ref name) = data.Name {
|
||||
if name.len() > 50 {
|
||||
err!("The field Name must be a string with a maximum length of 50.");
|
||||
}
|
||||
}
|
||||
|
||||
// Check against the password hint setting here so if it fails, the user
|
||||
// can retry without losing their invitation below.
|
||||
let password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&password_hint)?;
|
||||
|
||||
let mut verified_by_invite = false;
|
||||
|
||||
let mut user = match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(mut user) => {
|
||||
if !user.password_hash.is_empty() {
|
||||
if CONFIG.is_signup_allowed(&data.Email) {
|
||||
err!("User already exists")
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
|
||||
if let Some(token) = data.Token {
|
||||
let claims = decode_invite(&token)?;
|
||||
if claims.email == data.Email {
|
||||
if claims.email == email {
|
||||
// Verify the email address when signing up via a valid invite token
|
||||
verified_by_invite = true;
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
user
|
||||
} else {
|
||||
err!("Registration email does not match invite email")
|
||||
}
|
||||
} else if Invitation::take(&data.Email, &conn) {
|
||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
|
||||
} else if Invitation::take(&email, &mut conn).await {
|
||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
|
||||
user_org.status = UserOrgStatus::Accepted as i32;
|
||||
user_org.save(&conn)?;
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
user
|
||||
} else if CONFIG.is_signup_allowed(&data.Email) {
|
||||
err!("Account with this email already exists")
|
||||
} else if CONFIG.is_signup_allowed(&email)
|
||||
|| EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some()
|
||||
{
|
||||
user
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
@@ -97,8 +141,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
// Order is important here; the invitation check must come first
|
||||
// because the vaultwarden admin can invite anyone, regardless
|
||||
// of other signup restrictions.
|
||||
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
||||
User::new(data.Email.clone())
|
||||
if Invitation::take(&email, &mut conn).await || CONFIG.is_signup_allowed(&email) {
|
||||
User::new(email.clone())
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
@@ -106,7 +150,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
};
|
||||
|
||||
// Make sure we don't leave a lingering invitation.
|
||||
Invitation::take(&data.Email, &conn);
|
||||
Invitation::take(&email, &mut conn).await;
|
||||
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
@@ -118,73 +162,75 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
|
||||
user.set_password(&data.MasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.password_hint = password_hint;
|
||||
|
||||
// Add extra fields if present
|
||||
if let Some(name) = data.Name {
|
||||
user.name = name;
|
||||
}
|
||||
|
||||
if let Some(hint) = data.MasterPasswordHint {
|
||||
user.password_hint = Some(hint);
|
||||
}
|
||||
|
||||
if let Some(keys) = data.Keys {
|
||||
user.private_key = Some(keys.EncryptedPrivateKey);
|
||||
user.public_key = Some(keys.PublicKey);
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if CONFIG.signups_verify() {
|
||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid) {
|
||||
if CONFIG.signups_verify() && !verified_by_invite {
|
||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
|
||||
user.last_verifying_at = Some(user.created_at);
|
||||
} else if let Err(e) = mail::send_welcome(&user.email) {
|
||||
} else if let Err(e) = mail::send_welcome(&user.email).await {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(json!({
|
||||
"Object": "register",
|
||||
"CaptchaBypassToken": "",
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/accounts/profile")]
|
||||
fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&conn))
|
||||
async fn profile(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&mut conn).await)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ProfileData {
|
||||
#[serde(rename = "Culture")]
|
||||
_Culture: String, // Ignored, always use en-US
|
||||
MasterPasswordHint: Option<String>,
|
||||
// Culture: String, // Ignored, always use en-US
|
||||
// MasterPasswordHint: Option<String>, // Ignored, has been moved to ChangePassData
|
||||
Name: String,
|
||||
}
|
||||
|
||||
#[put("/accounts/profile", data = "<data>")]
|
||||
fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_profile(data, headers, conn)
|
||||
async fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_profile(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/profile", data = "<data>")]
|
||||
fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: ProfileData = data.into_inner().data;
|
||||
|
||||
let mut user = headers.user;
|
||||
// Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
|
||||
// This also prevents issues with very long usernames causing to large JWT's. See #2419
|
||||
if data.Name.len() > 50 {
|
||||
err!("The field Name must be a string with a maximum length of 50.");
|
||||
}
|
||||
|
||||
let mut user = headers.user;
|
||||
user.name = data.Name;
|
||||
user.password_hint = match data.MasterPasswordHint {
|
||||
Some(ref h) if h.is_empty() => None,
|
||||
_ => data.MasterPasswordHint,
|
||||
};
|
||||
user.save(&conn)?;
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>/public-key")]
|
||||
fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
||||
async fn get_public_keys(uuid: String, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -197,7 +243,7 @@ fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult
|
||||
}
|
||||
|
||||
#[post("/accounts/keys", data = "<data>")]
|
||||
fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: KeysData = data.into_inner().data;
|
||||
|
||||
let mut user = headers.user;
|
||||
@@ -205,7 +251,7 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
||||
user.private_key = Some(data.EncryptedPrivateKey);
|
||||
user.public_key = Some(data.PublicKey);
|
||||
|
||||
user.save(&conn)?;
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"PrivateKey": user.private_key,
|
||||
@@ -219,11 +265,17 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
||||
struct ChangePassData {
|
||||
MasterPasswordHash: String,
|
||||
NewMasterPasswordHash: String,
|
||||
MasterPasswordHint: Option<String>,
|
||||
Key: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/password", data = "<data>")]
|
||||
fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_password(
|
||||
data: JsonUpcase<ChangePassData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
let data: ChangePassData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -231,9 +283,17 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash, Some("post_rotatekey"));
|
||||
user.password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&user.password_hint)?;
|
||||
|
||||
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
user.set_password(
|
||||
&data.NewMasterPasswordHash,
|
||||
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
|
||||
);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -248,7 +308,7 @@ struct ChangeKdfData {
|
||||
}
|
||||
|
||||
#[post("/accounts/kdf", data = "<data>")]
|
||||
fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: ChangeKdfData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -260,7 +320,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -283,7 +343,13 @@ struct KeyData {
|
||||
}
|
||||
|
||||
#[post("/accounts/key", data = "<data>")]
|
||||
fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
async fn post_rotatekey(
|
||||
data: JsonUpcase<KeyData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: KeyData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
@@ -294,7 +360,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
|
||||
// Update folder data
|
||||
for folder_data in data.Folders {
|
||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn) {
|
||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
None => err!("Folder doesn't exist"),
|
||||
};
|
||||
@@ -304,14 +370,14 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
}
|
||||
|
||||
saved_folder.name = folder_data.Name;
|
||||
saved_folder.save(&conn)?
|
||||
saved_folder.save(&mut conn).await?
|
||||
}
|
||||
|
||||
// Update cipher data
|
||||
use super::ciphers::update_cipher_from_data;
|
||||
|
||||
for cipher_data in data.Ciphers {
|
||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn) {
|
||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
@@ -320,7 +386,10 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
err!("The cipher is not owned by the user")
|
||||
}
|
||||
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::CipherUpdate)?
|
||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None)
|
||||
.await?
|
||||
}
|
||||
|
||||
// Update user data
|
||||
@@ -329,13 +398,12 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
user.akey = data.Key;
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
user.reset_stamp_exception();
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/security-stamp", data = "<data>")]
|
||||
fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -343,9 +411,9 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -356,7 +424,7 @@ struct EmailTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email-token", data = "<data>")]
|
||||
fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: EmailTokenData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -364,7 +432,7 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
|
||||
if User::find_by_mail(&data.NewEmail, &mut conn).await.is_some() {
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
@@ -372,17 +440,17 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
||||
err!("Email domain not allowed");
|
||||
}
|
||||
|
||||
let token = crypto::generate_token(6)?;
|
||||
let token = crypto::generate_email_token(6);
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Err(e) = mail::send_change_email(&data.NewEmail, &token) {
|
||||
if let Err(e) = mail::send_change_email(&data.NewEmail, &token).await {
|
||||
error!("Error sending change-email email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
user.email_new = Some(data.NewEmail);
|
||||
user.email_new_token = Some(token);
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -397,7 +465,7 @@ struct ChangeEmailData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email", data = "<data>")]
|
||||
fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: ChangeEmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -405,7 +473,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
|
||||
if User::find_by_mail(&data.NewEmail, &mut conn).await.is_some() {
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
@@ -440,18 +508,18 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email")]
|
||||
fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||
async fn post_verify_email(headers: Headers) -> EmptyResult {
|
||||
let user = headers.user;
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
err!("Cannot verify email address");
|
||||
}
|
||||
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await {
|
||||
error!("Error sending verify_email email: {:#?}", e);
|
||||
}
|
||||
|
||||
@@ -466,10 +534,10 @@ struct VerifyEmailTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email-token", data = "<data>")]
|
||||
fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: VerifyEmailTokenData = data.into_inner().data;
|
||||
|
||||
let mut user = match User::find_by_uuid(&data.UserId, &conn) {
|
||||
let mut user = match User::find_by_uuid(&data.UserId, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -484,7 +552,7 @@ fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn)
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
user.last_verifying_at = None;
|
||||
user.login_verify_count = 0;
|
||||
if let Err(e) = user.save(&conn) {
|
||||
if let Err(e) = user.save(&mut conn).await {
|
||||
error!("Error saving email verification: {:#?}", e);
|
||||
}
|
||||
|
||||
@@ -498,14 +566,12 @@ struct DeleteRecoverData {
|
||||
}
|
||||
|
||||
#[post("/accounts/delete-recover", data = "<data>")]
|
||||
fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: DeleteRecoverData = data.into_inner().data;
|
||||
|
||||
let user = User::find_by_mail(&data.Email, &conn);
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Some(user) = user {
|
||||
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid) {
|
||||
if let Some(user) = User::find_by_mail(&data.Email, &mut conn).await {
|
||||
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid).await {
|
||||
error!("Error sending delete account email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
@@ -527,10 +593,10 @@ struct DeleteRecoverTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/delete-recover-token", data = "<data>")]
|
||||
fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: DeleteRecoverTokenData = data.into_inner().data;
|
||||
|
||||
let user = match User::find_by_uuid(&data.UserId, &conn) {
|
||||
let user = match User::find_by_uuid(&data.UserId, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -542,16 +608,16 @@ fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbC
|
||||
if claims.sub != user.uuid {
|
||||
err!("Invalid claim");
|
||||
}
|
||||
user.delete(&conn)
|
||||
user.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/delete", data = "<data>")]
|
||||
fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_account(data, headers, conn)
|
||||
async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_account(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[delete("/accounts", data = "<data>")]
|
||||
fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -559,7 +625,7 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.delete(&conn)
|
||||
user.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[get("/accounts/revision-date")]
|
||||
@@ -575,38 +641,62 @@ struct PasswordHintData {
|
||||
}
|
||||
|
||||
#[post("/accounts/password-hint", data = "<data>")]
|
||||
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordHintData = data.into_inner().data;
|
||||
|
||||
let hint = match User::find_by_mail(&data.Email, &conn) {
|
||||
Some(user) => user.password_hint,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_password_hint(&data.Email, hint)?;
|
||||
} else if CONFIG.show_password_hint() {
|
||||
if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", &hint));
|
||||
} else {
|
||||
err!("Sorry, you have no password hint...");
|
||||
}
|
||||
async fn password_hint(data: JsonUpcase<PasswordHintData>, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.mail_enabled() && !CONFIG.show_password_hint() {
|
||||
err!("This server is not configured to provide password hints.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
const NO_HINT: &str = "Sorry, you have no password hint...";
|
||||
|
||||
let data: PasswordHintData = data.into_inner().data;
|
||||
let email = &data.Email;
|
||||
|
||||
match User::find_by_mail(email, &mut conn).await {
|
||||
None => {
|
||||
// To prevent user enumeration, act as if the user exists.
|
||||
if CONFIG.mail_enabled() {
|
||||
// There is still a timing side channel here in that the code
|
||||
// paths that send mail take noticeably longer than ones that
|
||||
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
let delta: i32 = 100;
|
||||
let sleep_ms = (1_000 + rng.gen_range(-delta..=delta)) as u64;
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
|
||||
Ok(())
|
||||
} else {
|
||||
err!(NO_HINT);
|
||||
}
|
||||
}
|
||||
Some(user) => {
|
||||
let hint: Option<String> = user.password_hint;
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_password_hint(email, hint).await?;
|
||||
Ok(())
|
||||
} else if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", hint));
|
||||
} else {
|
||||
err!(NO_HINT);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct PreloginData {
|
||||
pub struct PreloginData {
|
||||
Email: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/prelogin", data = "<data>")]
|
||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
_prelogin(data, conn).await
|
||||
}
|
||||
|
||||
pub async fn _prelogin(data: JsonUpcase<PreloginData>, mut conn: DbConn) -> Json<Value> {
|
||||
let data: PreloginData = data.into_inner().data;
|
||||
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||
};
|
||||
@@ -616,15 +706,17 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
"KdfIterations": kdf_iter
|
||||
}))
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/master/src/Api/Models/Request/Accounts/SecretVerificationRequestModel.cs
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct VerifyPasswordData {
|
||||
struct SecretVerificationRequest {
|
||||
MasterPasswordHash: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-password", data = "<data>")]
|
||||
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||
let data: VerifyPasswordData = data.into_inner().data;
|
||||
fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers) -> EmptyResult {
|
||||
let data: SecretVerificationRequest = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
@@ -633,3 +725,50 @@ fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn _api_key(
|
||||
data: JsonUpcase<SecretVerificationRequest>,
|
||||
rotate: bool,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: SecretVerificationRequest = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if rotate || user.api_key.is_none() {
|
||||
user.api_key = Some(crypto::generate_api_key());
|
||||
user.save(&mut conn).await.expect("Error saving API key");
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"ApiKey": user.api_key,
|
||||
"Object": "apiKey",
|
||||
})))
|
||||
}
|
||||
|
||||
#[post("/accounts/api-key", data = "<data>")]
|
||||
async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_api_key(data, false, headers, conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/rotate-api-key", data = "<data>")]
|
||||
async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_api_key(data, true, headers, conn).await
|
||||
}
|
||||
|
||||
#[get("/devices/knowndevice/<email>/<uuid>")]
|
||||
async fn get_known_device(email: String, uuid: String, mut conn: DbConn) -> String {
|
||||
// This endpoint doesn't have auth header
|
||||
if let Some(user) = User::find_by_mail(&email, &mut conn).await {
|
||||
match Device::find_by_uuid_and_user(&uuid, &user.uuid, &mut conn).await {
|
||||
Some(_) => String::from("true"),
|
||||
_ => String::from("false"),
|
||||
}
|
||||
} else {
|
||||
String::from("false")
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
840
src/api/core/emergency_access.rs
Normal file
840
src/api/core/emergency_access.rs
Normal file
@@ -0,0 +1,840 @@
|
||||
use chrono::{Duration, Utc};
|
||||
use rocket::{serde::json::Json, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::{CipherSyncData, CipherSyncType},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
},
|
||||
auth::{decode_emergency_access_invite, Headers},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
get_contacts,
|
||||
get_grantees,
|
||||
get_emergency_access,
|
||||
put_emergency_access,
|
||||
delete_emergency_access,
|
||||
post_delete_emergency_access,
|
||||
send_invite,
|
||||
resend_invite,
|
||||
accept_invite,
|
||||
confirm_emergency_access,
|
||||
initiate_emergency_access,
|
||||
approve_emergency_access,
|
||||
reject_emergency_access,
|
||||
takeover_emergency_access,
|
||||
password_emergency_access,
|
||||
view_emergency_access,
|
||||
policies_emergency_access,
|
||||
]
|
||||
}
|
||||
|
||||
// region get
|
||||
|
||||
#[get("/emergency-access/trusted")]
|
||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||
for ea in emergency_access_list {
|
||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/emergency-access/granted")]
|
||||
async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await;
|
||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||
for ea in emergency_access_list {
|
||||
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/emergency-access/<emer_id>")]
|
||||
async fn get_emergency_access(emer_id: String, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
||||
None => err!("Emergency access not valid."),
|
||||
}
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region put/post
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EmergencyAccessUpdateData {
|
||||
Type: NumberOrString,
|
||||
WaitTimeDays: i32,
|
||||
KeyEncrypted: Option<String>,
|
||||
}
|
||||
|
||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn put_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
post_emergency_access(emer_id, data, conn).await
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn post_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emergency_access) => emergency_access,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
None => err!("Invalid emergency access type."),
|
||||
};
|
||||
|
||||
emergency_access.atype = new_type;
|
||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||
|
||||
emergency_access.save(&mut conn).await?;
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region delete
|
||||
|
||||
#[delete("/emergency-access/<emer_id>")]
|
||||
async fn delete_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let grantor_user = headers.user;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => {
|
||||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
emer
|
||||
}
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
emergency_access.delete(&mut conn).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/delete")]
|
||||
async fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_emergency_access(emer_id, headers, conn).await
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region invite
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EmergencyAccessInviteData {
|
||||
Email: String,
|
||||
Type: NumberOrString,
|
||||
WaitTimeDays: i32,
|
||||
}
|
||||
|
||||
#[post("/emergency-access/invite", data = "<data>")]
|
||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
||||
let email = data.Email.to_lowercase();
|
||||
let wait_time_days = data.WaitTimeDays;
|
||||
|
||||
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
|
||||
|
||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
None => err!("Invalid emergency access type."),
|
||||
};
|
||||
|
||||
let grantor_user = headers.user;
|
||||
|
||||
// avoid setting yourself as emergency contact
|
||||
if email == grantor_user.email {
|
||||
err!("You can not set yourself as an emergency contact.")
|
||||
}
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
||||
None => {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("Grantee user does not exist: {}", &email))
|
||||
}
|
||||
|
||||
if !CONFIG.is_email_domain_allowed(&email) {
|
||||
err!("Email domain not eligible for invitations")
|
||||
}
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
let invitation = Invitation::new(&email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
let mut user = User::new(email.clone());
|
||||
user.save(&mut conn).await?;
|
||||
user
|
||||
}
|
||||
Some(user) => user,
|
||||
};
|
||||
|
||||
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||
&grantor_user.uuid,
|
||||
&grantee_user.uuid,
|
||||
&grantee_user.email,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
.is_some()
|
||||
{
|
||||
err!(format!("Grantee user already invited: {}", &grantee_user.email))
|
||||
}
|
||||
|
||||
let mut new_emergency_access =
|
||||
EmergencyAccess::new(grantor_user.uuid, grantee_user.email, emergency_access_status, new_type, wait_time_days);
|
||||
new_emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite(
|
||||
&new_emergency_access.email.expect("Grantee email does not exists"),
|
||||
&grantee_user.uuid,
|
||||
&new_emergency_access.uuid,
|
||||
&grantor_user.name,
|
||||
&grantor_user.email,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(user) => match accept_invite_process(user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
},
|
||||
None => err!("Grantee user not found."),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||
async fn resend_invite(emer_id: String, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.grantor_uuid != headers.user.uuid {
|
||||
err!("Emergency access not valid.");
|
||||
}
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
||||
err!("The grantee user is already accepted or confirmed to the organization");
|
||||
}
|
||||
|
||||
let email = match emergency_access.email.clone() {
|
||||
Some(email) => email,
|
||||
None => err!("Email not valid."),
|
||||
};
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
let grantor_user = headers.user;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite(
|
||||
&email,
|
||||
&grantor_user.uuid,
|
||||
&emergency_access.uuid,
|
||||
&grantor_user.name,
|
||||
&grantor_user.email,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||
let invitation = Invitation::new(&email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match accept_invite_process(grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct AcceptData {
|
||||
Token: String,
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||
async fn accept_invite(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<AcceptData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: AcceptData = data.into_inner().data;
|
||||
let token = &data.Token;
|
||||
let claims = decode_emergency_access_invite(token)?;
|
||||
|
||||
// This can happen if the user who received the invite used a different email to signup.
|
||||
// Since we do not know if this is intented, we error out here and do nothing with the invite.
|
||||
if claims.email != headers.user.email {
|
||||
err!("Claim email does not match current users email")
|
||||
}
|
||||
|
||||
let grantee_user = match User::find_by_mail(&claims.email, &mut conn).await {
|
||||
Some(user) => {
|
||||
Invitation::take(&claims.email, &mut conn).await;
|
||||
user
|
||||
}
|
||||
None => err!("Invited user not found"),
|
||||
};
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if emer_id == claims.emer_id
|
||||
&& grantor_user.name == claims.grantor_name
|
||||
&& grantor_user.email == claims.grantor_email
|
||||
{
|
||||
match accept_invite_process(grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
err!("Emergency access invitation error.")
|
||||
}
|
||||
}
|
||||
|
||||
async fn accept_invite_process(
|
||||
grantee_uuid: String,
|
||||
emergency_access: &mut EmergencyAccess,
|
||||
grantee_email: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
|
||||
err!("User email does not match invite.");
|
||||
}
|
||||
|
||||
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
|
||||
err!("Emergency contact already accepted.");
|
||||
}
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
||||
emergency_access.grantee_uuid = Some(grantee_uuid);
|
||||
emergency_access.email = None;
|
||||
emergency_access.save(conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ConfirmData {
|
||||
Key: String,
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||
async fn confirm_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<ConfirmData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let confirming_user = headers.user;
|
||||
let data: ConfirmData = data.into_inner().data;
|
||||
let key = data.Key;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
||||
|| emergency_access.grantor_uuid != confirming_user.uuid
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||
emergency_access.key_encrypted = Some(key);
|
||||
emergency_access.email = None;
|
||||
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
err!("Grantee user not found.")
|
||||
}
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region access emergency access
|
||||
|
||||
#[post("/emergency-access/<emer_id>/initiate")]
|
||||
async fn initiate_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let initiating_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|
||||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid)
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
emergency_access.status = EmergencyAccessStatus::RecoveryInitiated as i32;
|
||||
emergency_access.updated_at = now;
|
||||
emergency_access.recovery_initiated_at = Some(now);
|
||||
emergency_access.last_notification_at = Some(now);
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_initiated(
|
||||
&grantor_user.email,
|
||||
&initiating_user.name,
|
||||
emergency_access.get_type_as_str(),
|
||||
&emergency_access.wait_time_days,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/approve")]
|
||||
async fn approve_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
err!("Grantee user not found.")
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reject")]
|
||||
async fn reject_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|
||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
err!("Grantee user not found.")
|
||||
}
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region action
|
||||
|
||||
#[post("/emergency-access/<emer_id>/view")]
|
||||
async fn view_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, headers.user.uuid, EmergencyAccessType::View) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &mut conn).await;
|
||||
let cipher_sync_data =
|
||||
CipherSyncData::new(&emergency_access.grantor_uuid, &ciphers, CipherSyncType::User, &mut conn).await;
|
||||
|
||||
let mut ciphers_json = Vec::new();
|
||||
for c in ciphers {
|
||||
ciphers_json
|
||||
.push(c.to_json(&headers.host, &emergency_access.grantor_uuid, Some(&cipher_sync_data), &mut conn).await);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Ciphers": ciphers_json,
|
||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||
"Object": "emergencyAccessView",
|
||||
})))
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/takeover")]
|
||||
async fn takeover_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Kdf": grantor_user.client_kdf_type,
|
||||
"KdfIterations": grantor_user.client_kdf_iter,
|
||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||
"Object": "emergencyAccessTakeover",
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EmergencyAccessPasswordData {
|
||||
NewMasterPasswordHash: String,
|
||||
Key: String,
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||
async fn password_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessPasswordData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
||||
let key = data.Key;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
// change grantor_user password
|
||||
grantor_user.set_password(new_master_password_hash, None);
|
||||
grantor_user.akey = key;
|
||||
grantor_user.save(&mut conn).await?;
|
||||
|
||||
// Disable TwoFactor providers since they will otherwise block logins
|
||||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?;
|
||||
|
||||
// Remove grantor from all organisations unless Owner
|
||||
for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &mut conn).await {
|
||||
if user_org.atype != UserOrgType::Owner as i32 {
|
||||
user_org.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
#[get("/emergency-access/<emer_id>/policies")]
|
||||
async fn policies_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn);
|
||||
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": policies_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
fn is_valid_request(
|
||||
emergency_access: &EmergencyAccess,
|
||||
requesting_user_uuid: String,
|
||||
requested_access_type: EmergencyAccessType,
|
||||
) -> bool {
|
||||
emergency_access.grantee_uuid == Some(requesting_user_uuid)
|
||||
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
|
||||
&& emergency_access.atype == requested_access_type as i32
|
||||
}
|
||||
|
||||
fn check_emergency_access_allowed() -> EmptyResult {
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
err!("Emergency access is not allowed.")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn emergency_request_timeout_job(pool: DbPool) {
|
||||
debug!("Start emergency_request_timeout_job");
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await;
|
||||
|
||||
if emergency_access_list.is_empty() {
|
||||
debug!("No emergency request timeout to approve");
|
||||
}
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
for mut emer in emergency_access_list {
|
||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||
let recovery_allowed_at =
|
||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
|
||||
if recovery_allowed_at.le(&now) {
|
||||
// Only update the access status
|
||||
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
||||
emer.update_access_status_and_save(EmergencyAccessStatus::RecoveryApproved as i32, &now, &mut conn)
|
||||
.await
|
||||
.expect("Unable to update emergency access status");
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user =
|
||||
User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found");
|
||||
|
||||
// get grantee user to send Accepted email
|
||||
let grantee_user =
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn)
|
||||
.await
|
||||
.expect("Grantee user not found");
|
||||
|
||||
mail::send_emergency_access_recovery_timed_out(
|
||||
&grantor_user.email,
|
||||
&grantee_user.name,
|
||||
emer.get_type_as_str(),
|
||||
)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!("Failed to get DB connection while searching emergency request timed out")
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
||||
debug!("Start emergency_notification_reminder_job");
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await;
|
||||
|
||||
if emergency_access_list.is_empty() {
|
||||
debug!("No emergency request reminder notification to send");
|
||||
}
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
for mut emer in emergency_access_list {
|
||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||
// Calculate the day before the recovery will become active
|
||||
let final_recovery_reminder_at =
|
||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
|
||||
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
||||
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
||||
last_notification_at + Duration::days(1)
|
||||
} else {
|
||||
now
|
||||
};
|
||||
if final_recovery_reminder_at.le(&now) && next_recovery_reminder_at.le(&now) {
|
||||
// Only update the last notification date
|
||||
// Updating the whole record could cause issues when the emergency_request_timeout_job is also active
|
||||
emer.update_last_notification_date_and_save(&now, &mut conn)
|
||||
.await
|
||||
.expect("Unable to update emergency access notification date");
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user =
|
||||
User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found");
|
||||
|
||||
// get grantee user to send Accepted email
|
||||
let grantee_user =
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn)
|
||||
.await
|
||||
.expect("Grantee user not found");
|
||||
|
||||
mail::send_emergency_access_recovery_reminder(
|
||||
&grantor_user.email,
|
||||
&grantee_user.name,
|
||||
emer.get_type_as_str(),
|
||||
"1", // This notification is only triggered one day before the activation
|
||||
)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!("Failed to get DB connection while searching emergency notification reminder")
|
||||
}
|
||||
}
|
341
src/api/core/events.rs
Normal file
341
src/api/core/events.rs
Normal file
@@ -0,0 +1,341 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use chrono::NaiveDateTime;
|
||||
use rocket::{form::FromForm, serde::json::Json, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcaseVec},
|
||||
auth::{AdminHeaders, ClientIp, Headers},
|
||||
db::{
|
||||
models::{Cipher, Event, UserOrganization},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
util::parse_date,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
/// ###############################################################################################################
|
||||
/// /api routes
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_org_events, get_cipher_events, get_user_events,]
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EventRange {
|
||||
start: String,
|
||||
end: String,
|
||||
#[field(name = "continuationToken")]
|
||||
continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
|
||||
#[get("/organizations/<org_id>/events?<data..>")]
|
||||
async fn get_org_events(org_id: String, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
} else {
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
.collect()
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>/events?<data..>")]
|
||||
async fn get_cipher_events(cipher_id: String, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let mut events_json = Vec::with_capacity(0);
|
||||
if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
} else {
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
.collect()
|
||||
}
|
||||
events_json
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")]
|
||||
async fn get_user_events(
|
||||
org_id: String,
|
||||
user_org_id: String,
|
||||
data: EventRange,
|
||||
_headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
} else {
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_org_and_user_org(&org_id, &user_org_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
.collect()
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
|
||||
// When the length of the vec equals the max page_size there probably is more data
|
||||
// When it is less, then all events are loaded.
|
||||
if events_json.len() as i64 == Event::PAGE_SIZE {
|
||||
if let Some(last_event) = events_json.last() {
|
||||
last_event["date"].as_str()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// ###############################################################################################################
|
||||
/// /events routes
|
||||
pub fn main_routes() -> Vec<Route> {
|
||||
routes![post_events_collect,]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EventCollection {
|
||||
// Mandatory
|
||||
Type: i32,
|
||||
Date: String,
|
||||
|
||||
// Optional
|
||||
CipherId: Option<String>,
|
||||
OrganizationId: Option<String>,
|
||||
}
|
||||
|
||||
// Upstream:
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||
#[post("/collect", format = "application/json", data = "<data>")]
|
||||
async fn post_events_collect(
|
||||
data: JsonUpcaseVec<EventCollection>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for event in data.iter().map(|d| &d.data) {
|
||||
let event_date = parse_date(&event.Date);
|
||||
match event.Type {
|
||||
1000..=1099 => {
|
||||
_log_user_event(
|
||||
event.Type,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
1600..=1699 => {
|
||||
if let Some(org_uuid) = &event.OrganizationId {
|
||||
_log_event(
|
||||
event.Type,
|
||||
org_uuid,
|
||||
String::from(org_uuid),
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if let Some(cipher_uuid) = &event.CipherId {
|
||||
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||
if let Some(org_uuid) = cipher.organization_uuid {
|
||||
_log_event(
|
||||
event.Type,
|
||||
cipher_uuid,
|
||||
org_uuid,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return;
|
||||
}
|
||||
_log_user_event(event_type, user_uuid, device_type, None, ip, conn).await;
|
||||
}
|
||||
|
||||
async fn _log_user_event(
|
||||
event_type: i32,
|
||||
user_uuid: &str,
|
||||
device_type: i32,
|
||||
event_date: Option<NaiveDateTime>,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await;
|
||||
let mut events: Vec<Event> = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org
|
||||
|
||||
// Upstream saves the event also without any org_uuid.
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
event.user_uuid = Some(String::from(user_uuid));
|
||||
event.act_user_uuid = Some(String::from(user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
events.push(event);
|
||||
|
||||
// For each org a user is a member of store these events per org
|
||||
for org_uuid in orgs {
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
event.user_uuid = Some(String::from(user_uuid));
|
||||
event.org_uuid = Some(org_uuid);
|
||||
event.act_user_uuid = Some(String::from(user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
events.push(event);
|
||||
}
|
||||
|
||||
Event::save_user_event(events, conn).await.unwrap_or(());
|
||||
}
|
||||
|
||||
pub async fn log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: String,
|
||||
act_user_uuid: String,
|
||||
device_type: i32,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return;
|
||||
}
|
||||
_log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn _log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: String,
|
||||
act_user_uuid: &str,
|
||||
device_type: i32,
|
||||
event_date: Option<NaiveDateTime>,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Create a new empty event
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
match event_type {
|
||||
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
||||
// Collection Events
|
||||
1100..=1199 => {
|
||||
event.cipher_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Collection Events
|
||||
1300..=1399 => {
|
||||
event.collection_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Group Events
|
||||
1400..=1499 => {
|
||||
event.group_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Org User Events
|
||||
1500..=1599 => {
|
||||
event.org_user_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// 1600..=1699 Are organizational events, and they do not need the source_uuid
|
||||
// Policy Events
|
||||
1700..=1799 => {
|
||||
event.policy_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Ignore others
|
||||
_ => {}
|
||||
}
|
||||
|
||||
event.org_uuid = Some(org_uuid);
|
||||
event.act_user_uuid = Some(String::from(act_user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
event.save(conn).await.unwrap_or(());
|
||||
}
|
||||
|
||||
pub async fn event_cleanup_job(pool: DbPool) {
|
||||
debug!("Start events cleanup job");
|
||||
if CONFIG.events_days_retain().is_none() {
|
||||
debug!("events_days_retain is not configured, abort");
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
Event::clean_events(&mut conn).await.ok();
|
||||
} else {
|
||||
error!("Failed to get DB connection while trying to cleanup the events table")
|
||||
}
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -12,9 +12,8 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
}
|
||||
|
||||
#[get("/folders")]
|
||||
fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||
|
||||
async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
Json(json!({
|
||||
@@ -25,8 +24,8 @@ fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
}
|
||||
|
||||
#[get("/folders/<uuid>")]
|
||||
fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
async fn get_folder(uuid: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -45,27 +44,39 @@ pub struct FolderData {
|
||||
}
|
||||
|
||||
#[post("/folders", data = "<data>")]
|
||||
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
||||
|
||||
folder.save(&conn)?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &folder);
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &folder).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>", data = "<data>")]
|
||||
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn, nt)
|
||||
async fn post_folder(
|
||||
uuid: String,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[put("/folders/<uuid>", data = "<data>")]
|
||||
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
async fn put_folder(
|
||||
uuid: String,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
let mut folder = match Folder::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -76,20 +87,20 @@ fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn
|
||||
|
||||
folder.name = data.Name;
|
||||
|
||||
folder.save(&conn)?;
|
||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder);
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>/delete")]
|
||||
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, nt)
|
||||
async fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[delete("/folders/<uuid>")]
|
||||
fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
async fn delete_folder(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -99,8 +110,8 @@ fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> Em
|
||||
}
|
||||
|
||||
// Delete the actual folder entry
|
||||
folder.delete(&conn)?;
|
||||
folder.delete(&mut conn).await?;
|
||||
|
||||
nt.send_folder_update(UpdateType::FolderDelete, &folder);
|
||||
nt.send_folder_update(UpdateType::FolderDelete, &folder).await;
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -1,25 +1,45 @@
|
||||
mod accounts;
|
||||
pub mod accounts;
|
||||
mod ciphers;
|
||||
mod emergency_access;
|
||||
mod events;
|
||||
mod folders;
|
||||
mod organizations;
|
||||
mod sends;
|
||||
pub mod two_factor;
|
||||
|
||||
pub use ciphers::purge_trashed_ciphers;
|
||||
pub use ciphers::{CipherSyncData, CipherSyncType};
|
||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||
pub use sends::purge_sends;
|
||||
pub use two_factor::send_incomplete_2fa_notifications;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut mod_routes =
|
||||
routes![clear_device_token, put_device_token, get_eq_domains, post_eq_domains, put_eq_domains, hibp_breach,];
|
||||
let mut device_token_routes = routes![clear_device_token, put_device_token];
|
||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||
let mut hibp_routes = routes![hibp_breach];
|
||||
let mut meta_routes = routes![alive, now, version, config];
|
||||
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut accounts::routes());
|
||||
routes.append(&mut ciphers::routes());
|
||||
routes.append(&mut emergency_access::routes());
|
||||
routes.append(&mut events::routes());
|
||||
routes.append(&mut folders::routes());
|
||||
routes.append(&mut organizations::routes());
|
||||
routes.append(&mut two_factor::routes());
|
||||
routes.append(&mut sends::routes());
|
||||
routes.append(&mut mod_routes);
|
||||
routes.append(&mut device_token_routes);
|
||||
routes.append(&mut eq_domains_routes);
|
||||
routes.append(&mut hibp_routes);
|
||||
routes.append(&mut meta_routes);
|
||||
|
||||
routes
|
||||
}
|
||||
|
||||
pub fn events_routes() -> Vec<Route> {
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut events::main_routes());
|
||||
|
||||
routes
|
||||
}
|
||||
@@ -27,8 +47,9 @@ pub fn routes() -> Vec<Route> {
|
||||
//
|
||||
// Move this somewhere else
|
||||
//
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Catcher;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -117,7 +138,7 @@ struct EquivDomainData {
|
||||
}
|
||||
|
||||
#[post("/settings/domains", data = "<data>")]
|
||||
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EquivDomainData = data.into_inner().data;
|
||||
|
||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||
@@ -129,18 +150,18 @@ fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: Db
|
||||
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string());
|
||||
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string());
|
||||
|
||||
user.save(&conn)?;
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
#[put("/settings/domains", data = "<data>")]
|
||||
fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn)
|
||||
async fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
fn hibp_breach(username: String) -> JsonResult {
|
||||
async fn hibp_breach(username: String) -> JsonResult {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||
username
|
||||
@@ -149,14 +170,14 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let hibp_client = get_reqwest_client();
|
||||
|
||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send()?;
|
||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?;
|
||||
|
||||
// If we get a 404, return a 404, it means no breached accounts
|
||||
if res.status() == 404 {
|
||||
return Err(Error::empty().with_code(404));
|
||||
}
|
||||
|
||||
let value: Value = res.error_for_status()?.json()?;
|
||||
let value: Value = res.error_for_status()?.json().await?;
|
||||
Ok(Json(value))
|
||||
} else {
|
||||
Ok(Json(json!([{
|
||||
@@ -166,7 +187,7 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
"BreachDate": "2019-08-18T00:00:00Z",
|
||||
"AddedDate": "2019-08-18T00:00:00Z",
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"LogoPath": "bwrs_static/hibp.png",
|
||||
"LogoPath": "vw_static/hibp.png",
|
||||
"PwnCount": 0,
|
||||
"DataClasses": [
|
||||
"Error - No API key set!"
|
||||
@@ -174,3 +195,54 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
}])))
|
||||
}
|
||||
}
|
||||
|
||||
// We use DbConn here to let the alive healthcheck also verify the database connection.
|
||||
#[get("/alive")]
|
||||
fn alive(_conn: DbConn) -> Json<String> {
|
||||
now()
|
||||
}
|
||||
|
||||
#[get("/now")]
|
||||
pub fn now() -> Json<String> {
|
||||
Json(crate::util::format_date(&chrono::Utc::now().naive_utc()))
|
||||
}
|
||||
|
||||
#[get("/version")]
|
||||
fn version() -> Json<&'static str> {
|
||||
Json(crate::VERSION.unwrap_or_default())
|
||||
}
|
||||
|
||||
#[get("/config")]
|
||||
fn config() -> Json<Value> {
|
||||
let domain = crate::CONFIG.domain();
|
||||
Json(json!({
|
||||
"version": crate::VERSION,
|
||||
"gitHash": option_env!("GIT_REV"),
|
||||
"server": {
|
||||
"name": "Vaultwarden",
|
||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
||||
},
|
||||
"environment": {
|
||||
"vault": domain,
|
||||
"api": format!("{domain}/api"),
|
||||
"identity": format!("{domain}/identity"),
|
||||
"notifications": format!("{domain}/notifications"),
|
||||
"sso": "",
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
catchers![api_not_found]
|
||||
}
|
||||
|
||||
#[catch(404)]
|
||||
fn api_not_found() -> Json<Value> {
|
||||
Json(json!({
|
||||
"error": {
|
||||
"code": 404,
|
||||
"reason": "Not Found",
|
||||
"description": "The requested resource could not be found."
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,22 +1,29 @@
|
||||
use std::{io::Read, path::Path};
|
||||
use std::path::Path;
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||
use rocket::{http::ContentType, response::NamedFile, Data};
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::form::Form;
|
||||
use rocket::fs::NamedFile;
|
||||
use rocket::fs::TempFile;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::{Headers, Host},
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
|
||||
auth::{ClientIp, Headers, Host},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
util::SafeString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||
|
||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||
const SIZE_525_MB: u64 = 550_502_400;
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
get_sends,
|
||||
get_send,
|
||||
post_send,
|
||||
post_send_file,
|
||||
post_access,
|
||||
@@ -24,14 +31,16 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
put_send,
|
||||
delete_send,
|
||||
put_remove_password,
|
||||
download_send
|
||||
download_send,
|
||||
post_send_file_v2,
|
||||
post_send_file_v2_data
|
||||
]
|
||||
}
|
||||
|
||||
pub fn purge_sends(pool: DbPool) {
|
||||
pub async fn purge_sends(pool: DbPool) {
|
||||
debug!("Purging sends");
|
||||
if let Ok(conn) = pool.get() {
|
||||
Send::purge(&conn);
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
Send::purge(&mut conn).await;
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging sends")
|
||||
}
|
||||
@@ -39,21 +48,22 @@ pub fn purge_sends(pool: DbPool) {
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendData {
|
||||
pub Type: i32,
|
||||
pub Key: String,
|
||||
pub Password: Option<String>,
|
||||
pub MaxAccessCount: Option<i32>,
|
||||
pub ExpirationDate: Option<DateTime<Utc>>,
|
||||
pub DeletionDate: DateTime<Utc>,
|
||||
pub Disabled: bool,
|
||||
pub HideEmail: Option<bool>,
|
||||
struct SendData {
|
||||
Type: i32,
|
||||
Key: String,
|
||||
Password: Option<String>,
|
||||
MaxAccessCount: Option<NumberOrString>,
|
||||
ExpirationDate: Option<DateTime<Utc>>,
|
||||
DeletionDate: DateTime<Utc>,
|
||||
Disabled: bool,
|
||||
HideEmail: Option<bool>,
|
||||
|
||||
// Data field
|
||||
pub Name: String,
|
||||
pub Notes: Option<String>,
|
||||
pub Text: Option<Value>,
|
||||
pub File: Option<Value>,
|
||||
Name: String,
|
||||
Notes: Option<String>,
|
||||
Text: Option<Value>,
|
||||
File: Option<Value>,
|
||||
FileLength: Option<NumberOrString>,
|
||||
}
|
||||
|
||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||
@@ -64,10 +74,11 @@ pub struct SendData {
|
||||
///
|
||||
/// There is also a Vaultwarden-specific `sends_allowed` config setting that
|
||||
/// controls this policy globally.
|
||||
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::DisableSend;
|
||||
if !CONFIG.sends_allowed() || OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
if !CONFIG.sends_allowed()
|
||||
|| OrgPolicy::is_applicable_to_user(user_uuid, OrgPolicyType::DisableSend, None, conn).await
|
||||
{
|
||||
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||
}
|
||||
Ok(())
|
||||
@@ -79,10 +90,10 @@ fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult
|
||||
/// but is allowed to remove this option from an existing Send.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||
fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let hide_email = data.HideEmail.unwrap_or(false);
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn) {
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
|
||||
err!(
|
||||
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||
from recipients when creating or editing a Send."
|
||||
@@ -116,7 +127,10 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
||||
send.user_uuid = Some(user_uuid);
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.max_access_count = match data.MaxAccessCount {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
};
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.disabled = data.Disabled;
|
||||
send.hide_email = data.HideEmail;
|
||||
@@ -127,108 +141,234 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
Ok(send)
|
||||
}
|
||||
|
||||
#[get("/sends")]
|
||||
async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
let sends = Send::find_by_user(&headers.user.uuid, &mut conn);
|
||||
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
|
||||
|
||||
Json(json!({
|
||||
"Data": sends_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/sends/<uuid>")]
|
||||
async fn get_send(uuid: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let send = match Send::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(send) => send,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[post("/sends", data = "<data>")]
|
||||
fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
if data.Type == SendType::File as i32 {
|
||||
err!("File sends should use /api/sends/file")
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid.clone())?;
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
struct UploadData<'f> {
|
||||
model: Json<crate::util::UpCase<SendData>>,
|
||||
data: TempFile<'f>,
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
struct UploadDataV2<'f> {
|
||||
data: TempFile<'f>,
|
||||
}
|
||||
|
||||
// @deprecated Mar 25 2021: This method has been deprecated in favor of direct uploads (v2).
|
||||
// This method still exists to support older clients, probably need to remove it sometime.
|
||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L164-L167
|
||||
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let boundary = content_type.params().next().expect("No boundary provided").1;
|
||||
let UploadData {
|
||||
model,
|
||||
mut data,
|
||||
} = data.into_inner();
|
||||
let model = model.into_inner().data;
|
||||
|
||||
let mut mpart = Multipart::with_body(data.open(), boundary);
|
||||
|
||||
// First entry is the SendData JSON
|
||||
let mut model_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "model" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
|
||||
let mut buf = String::new();
|
||||
model_entry.data.read_to_string(&mut buf)?;
|
||||
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
|
||||
enforce_disable_hide_email_policy(&data.data, &headers, &conn)?;
|
||||
|
||||
// Get the file length and add an extra 10% to avoid issues
|
||||
const SIZE_110_MB: u64 = 115_343_360;
|
||||
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
||||
if left <= 0 {
|
||||
err!("Attachment size limit reached! Delete some files to open space")
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_110_MB)
|
||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||
}
|
||||
None => SIZE_110_MB,
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
// Create the Send
|
||||
let mut send = create_send(data.data, headers.user.uuid.clone())?;
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
|
||||
let mut send = create_send(model, headers.user.uuid)?;
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id);
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data
|
||||
{
|
||||
err!("Error reading send file data. Please try an other client.");
|
||||
}
|
||||
|
||||
// Read the data entry and save the file
|
||||
let mut data_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "data" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
let size = data.len();
|
||||
if size > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let size = match data_entry.data.save().memory_threshold(0).size_limit(size_limit).with_path(&file_path) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment is not a file: {:?}", other));
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Error: {:?}", e));
|
||||
}
|
||||
};
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.persist_to(&file_path).await {
|
||||
data.move_copy_to(file_path).await?
|
||||
}
|
||||
|
||||
// Set ID and sizes
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id));
|
||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
|
||||
// Save the changes in the database
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
|
||||
#[post("/sends/file/v2", data = "<data>")]
|
||||
async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data = data.into_inner().data;
|
||||
|
||||
if data.Type != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
let file_length = match &data.FileLength {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||
}
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
if file_length.is_some() && file_length.unwrap() as u64 > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
||||
o.insert(String::from("Size"), Value::Number(file_length.unwrap().into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap())));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"fileUploadType": 0, // 0 == Direct | 1 == Azure
|
||||
"object": "send-fileUpload",
|
||||
"url": format!("/sends/{}/file/{}", send.uuid, file_id),
|
||||
"sendResponse": send.to_json()
|
||||
})))
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L243
|
||||
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||
async fn post_send_file_v2_data(
|
||||
send_uuid: String,
|
||||
file_id: String,
|
||||
data: Form<UploadDataV2<'_>>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let mut data = data.into_inner();
|
||||
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data.data
|
||||
{
|
||||
err!("Error reading attachment data. Please try an other client.");
|
||||
}
|
||||
|
||||
if let Some(send) = Send::find_by_uuid(&send_uuid, &mut conn).await {
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send_uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
data.data.move_copy_to(file_path).await?
|
||||
}
|
||||
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
} else {
|
||||
err!("Send not found. Unable to save the file.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendAccessData {
|
||||
@@ -236,8 +376,13 @@ pub struct SendAccessData {
|
||||
}
|
||||
|
||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||
fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &conn) {
|
||||
async fn post_access(
|
||||
access_id: String,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
@@ -265,8 +410,8 @@ fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn
|
||||
if send.password_hash.is_some() {
|
||||
match data.into_inner().data.Password {
|
||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||
Some(_) => err!("Invalid password."),
|
||||
None => err_code!("Password not provided", 401),
|
||||
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
|
||||
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,20 +420,20 @@ fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn
|
||||
send.access_count += 1;
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
Ok(Json(send.to_json_access(&conn)))
|
||||
Ok(Json(send.to_json_access(&mut conn).await))
|
||||
}
|
||||
|
||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||
fn post_access_file(
|
||||
async fn post_access_file(
|
||||
send_id: String,
|
||||
file_id: String,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
host: Host,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &conn) {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
@@ -323,7 +468,7 @@ fn post_access_file(
|
||||
|
||||
send.access_count += 1;
|
||||
|
||||
send.save(&conn)?;
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
@@ -335,23 +480,29 @@ fn post_access_file(
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||
fn download_send(send_id: String, file_id: String, t: String) -> Option<NamedFile> {
|
||||
async fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(&t) {
|
||||
if claims.sub == format!("{}/{}", send_id, file_id) {
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).ok();
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[put("/sends/<id>", data = "<data>")]
|
||||
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn put_send(
|
||||
id: String,
|
||||
data: JsonUpcase<SendData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
let mut send = match Send::find_by_uuid(&id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -385,7 +536,10 @@ fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbCo
|
||||
send.akey = data.Key;
|
||||
send.deletion_date = data.DeletionDate.naive_utc();
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.max_access_count = match data.MaxAccessCount {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
};
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.hide_email = data.HideEmail;
|
||||
send.disabled = data.Disabled;
|
||||
@@ -395,15 +549,15 @@ fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbCo
|
||||
send.set_password(Some(&password));
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[delete("/sends/<id>")]
|
||||
fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(&id, &conn) {
|
||||
async fn delete_send(id: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(&id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -412,17 +566,17 @@ fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyR
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
send.delete(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendDelete, &headers.user);
|
||||
send.delete(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendDelete, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/sends/<id>/remove-password")]
|
||||
fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn put_remove_password(id: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
let mut send = match Send::find_by_uuid(&id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -432,8 +586,8 @@ fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -
|
||||
}
|
||||
|
||||
send.set_password(None);
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
@@ -1,15 +1,16 @@
|
||||
use data_encoding::BASE32;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
||||
NumberOrString, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
};
|
||||
@@ -21,7 +22,7 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -30,11 +31,11 @@ fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::Authenticator as i32;
|
||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn);
|
||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
||||
|
||||
let (enabled, key) = match twofactor {
|
||||
Some(tf) => (true, tf.data),
|
||||
_ => (false, BASE32.encode(&crypto::get_random(vec![0u8; 20]))),
|
||||
_ => (false, crypto::encode_random_bytes::<20>(BASE32)),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -53,16 +54,16 @@ struct EnableAuthenticatorData {
|
||||
}
|
||||
|
||||
#[post("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator(
|
||||
async fn activate_authenticator(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
let key = data.Key;
|
||||
let token = data.Token.into_i32()? as u64;
|
||||
let token = data.Token.into_string();
|
||||
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -81,9 +82,11 @@ fn activate_authenticator(
|
||||
}
|
||||
|
||||
// Validate the token provided with the key, and save new twofactor
|
||||
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &ip, &conn)?;
|
||||
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &mut conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -93,77 +96,94 @@ fn activate_authenticator(
|
||||
}
|
||||
|
||||
#[put("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator_put(
|
||||
async fn activate_authenticator_put(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
activate_authenticator(data, headers, ip, conn)
|
||||
activate_authenticator(data, headers, ip, conn).await
|
||||
}
|
||||
|
||||
pub fn validate_totp_code_str(
|
||||
pub async fn validate_totp_code_str(
|
||||
user_uuid: &str,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
ip: &ClientIp,
|
||||
conn: &DbConn,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
let totp_code: u64 = match totp_code.parse() {
|
||||
Ok(code) => code,
|
||||
_ => err!("TOTP code is not a number"),
|
||||
};
|
||||
if !totp_code.chars().all(char::is_numeric) {
|
||||
err!("TOTP code is not a number");
|
||||
}
|
||||
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, conn)
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, conn).await
|
||||
}
|
||||
|
||||
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
||||
use oath::{totp_raw_custom_time, HashType};
|
||||
pub async fn validate_totp_code(
|
||||
user_uuid: &str,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
ip: &ClientIp,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
use totp_lite::{totp_custom, Sha1};
|
||||
|
||||
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
||||
Ok(s) => s,
|
||||
Err(_) => err!("Invalid TOTP secret"),
|
||||
};
|
||||
|
||||
let mut twofactor = match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn) {
|
||||
Some(tf) => tf,
|
||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||
};
|
||||
let mut twofactor =
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||
};
|
||||
|
||||
// The amount of steps back and forward in time
|
||||
// Also check if we need to disable time drifted TOTP codes.
|
||||
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
||||
let steps = i64::from(!CONFIG.authenticator_disable_time_drift());
|
||||
|
||||
// Get the current system time in UNIX Epoch (UTC)
|
||||
let current_time = chrono::Utc::now();
|
||||
let current_timestamp = current_time.timestamp();
|
||||
|
||||
// The amount of steps back and forward in time
|
||||
// Also check if we need to disable time drifted TOTP codes.
|
||||
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
||||
let steps = !CONFIG.authenticator_disable_time_drift() as i64;
|
||||
|
||||
for step in -steps..=steps {
|
||||
let time_step = current_timestamp / 30i64 + step;
|
||||
// We need to calculate the time offsite and cast it as an i128.
|
||||
// Else we can't do math with it on a default u64 variable.
|
||||
|
||||
// We need to calculate the time offsite and cast it as an u64.
|
||||
// Since we only have times into the future and the totp generator needs an u64 instead of the default i64.
|
||||
let time = (current_timestamp + step * 30i64) as u64;
|
||||
let generated = totp_raw_custom_time(&decoded_secret, 6, 0, 30, time, &HashType::SHA1);
|
||||
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
||||
|
||||
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
||||
if generated == totp_code && time_step > twofactor.last_used as i64 {
|
||||
if generated == totp_code && time_step > i64::from(twofactor.last_used) {
|
||||
// If the step does not equals 0 the time is drifted either server or client side.
|
||||
if step != 0 {
|
||||
info!("TOTP Time drift detected. The step offset is {}", step);
|
||||
warn!("TOTP Time drift detected. The step offset is {}", step);
|
||||
}
|
||||
|
||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||
// This will also save a newly created twofactor if the code is correct.
|
||||
twofactor.last_used = time_step as i32;
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
return Ok(());
|
||||
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||
warn!("This or a TOTP code within {} steps back and forward has already been used!", steps);
|
||||
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
|
||||
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
||||
err!(
|
||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Else no valide code received, deny access
|
||||
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||
err!(
|
||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
);
|
||||
}
|
||||
|
@@ -1,14 +1,17 @@
|
||||
use chrono::Utc;
|
||||
use data_encoding::BASE64;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
api::{
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||
PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType, User},
|
||||
models::{EventType, TwoFactor, TwoFactorType, User},
|
||||
DbConn,
|
||||
},
|
||||
error::MapResult,
|
||||
@@ -89,14 +92,14 @@ impl DuoStatus {
|
||||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||
|
||||
#[post("/two-factor/get-duo", data = "<data>")]
|
||||
fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let data = get_user_duo_data(&headers.user.uuid, &conn);
|
||||
let data = get_user_duo_data(&headers.user.uuid, &mut conn).await;
|
||||
|
||||
let (enabled, data) = match data {
|
||||
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
||||
@@ -152,7 +155,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
||||
}
|
||||
|
||||
#[post("/two-factor/duo", data = "<data>")]
|
||||
fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
let data: EnableDuoData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -163,7 +166,7 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
||||
let (data, data_str) = if check_duo_fields_custom(&data) {
|
||||
let data_req: DuoData = data.into();
|
||||
let data_str = serde_json::to_string(&data_req)?;
|
||||
duo_api_request("GET", "/auth/v2/check", "", &data_req).map_res("Failed to validate Duo credentials")?;
|
||||
duo_api_request("GET", "/auth/v2/check", "", &data_req).await.map_res("Failed to validate Duo credentials")?;
|
||||
(data_req.obscure(), data_str)
|
||||
} else {
|
||||
(DuoData::secret(), String::new())
|
||||
@@ -171,9 +174,11 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
||||
|
||||
let type_ = TwoFactorType::Duo;
|
||||
let twofactor = TwoFactor::new(user.uuid.clone(), type_, data_str);
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(&mut conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -185,11 +190,11 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
||||
}
|
||||
|
||||
#[put("/two-factor/duo", data = "<data>")]
|
||||
fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_duo(data, headers, conn)
|
||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
activate_duo(data, headers, conn, ip).await
|
||||
}
|
||||
|
||||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
use reqwest::{header, Method};
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -209,7 +214,8 @@ fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> Em
|
||||
.basic_auth(username, Some(password))
|
||||
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||
.header(header::DATE, date)
|
||||
.send()?
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()?;
|
||||
|
||||
Ok(())
|
||||
@@ -222,11 +228,11 @@ const AUTH_PREFIX: &str = "AUTH";
|
||||
const DUO_PREFIX: &str = "TX";
|
||||
const APP_PREFIX: &str = "APP";
|
||||
|
||||
fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||
async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus {
|
||||
let type_ = TwoFactorType::Duo as i32;
|
||||
|
||||
// If the user doesn't have an entry, disabled
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await {
|
||||
Some(t) => t,
|
||||
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
||||
};
|
||||
@@ -246,19 +252,20 @@ fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||
}
|
||||
|
||||
// let (ik, sk, ak, host) = get_duo_keys();
|
||||
fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
|
||||
let data = User::find_by_mail(email, conn)
|
||||
.and_then(|u| get_user_duo_data(&u.uuid, conn).data())
|
||||
.or_else(DuoData::global)
|
||||
.map_res("Can't fetch Duo keys")?;
|
||||
async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
|
||||
let data = match User::find_by_mail(email, conn).await {
|
||||
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
|
||||
_ => DuoData::global(),
|
||||
}
|
||||
.map_res("Can't fetch Duo Keys")?;
|
||||
|
||||
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
|
||||
}
|
||||
|
||||
pub fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> {
|
||||
pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {
|
||||
let now = Utc::now().timestamp();
|
||||
|
||||
let (ik, sk, ak, host) = get_duo_keys_email(email, conn)?;
|
||||
let (ik, sk, ak, host) = get_duo_keys_email(email, conn).await?;
|
||||
|
||||
let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE);
|
||||
let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE);
|
||||
@@ -273,14 +280,19 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
|
||||
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
|
||||
}
|
||||
|
||||
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
// email is as entered by the user, so it needs to be normalized before
|
||||
// comparison with auth_user below.
|
||||
let email = &email.to_lowercase();
|
||||
|
||||
let split: Vec<&str> = response.split(':').collect();
|
||||
if split.len() != 2 {
|
||||
err!("Invalid response length");
|
||||
err!(
|
||||
"Invalid response length",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
let auth_sig = split[0];
|
||||
@@ -288,13 +300,18 @@ pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyRe
|
||||
|
||||
let now = Utc::now().timestamp();
|
||||
|
||||
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn)?;
|
||||
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn).await?;
|
||||
|
||||
let auth_user = parse_duo_values(&sk, auth_sig, &ik, AUTH_PREFIX, now)?;
|
||||
let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?;
|
||||
|
||||
if !crypto::ct_eq(&auth_user, app_user) || !crypto::ct_eq(&auth_user, email) {
|
||||
err!("Error validating duo authentication")
|
||||
err!(
|
||||
"Error validating duo authentication",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@@ -1,13 +1,16 @@
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
api::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
@@ -28,13 +31,13 @@ struct SendEmailLoginData {
|
||||
/// User is trying to login and wants to use email 2FA.
|
||||
/// Does not require Bearer token
|
||||
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
|
||||
fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> EmptyResult {
|
||||
async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailLoginData = data.into_inner().data;
|
||||
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let user = match User::find_by_mail(&data.Email, &conn) {
|
||||
let user = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
};
|
||||
@@ -48,31 +51,32 @@ fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> Empty
|
||||
err!("Email 2FA is disabled")
|
||||
}
|
||||
|
||||
send_token(&user.uuid, &conn)?;
|
||||
send_token(&user.uuid, &mut conn).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate the token, save the data for later verification and send email to user
|
||||
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, conn).map_res("Two factor not found")?;
|
||||
let mut twofactor =
|
||||
TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await.map_res("Two factor not found")?;
|
||||
|
||||
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
|
||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||
|
||||
let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
twofactor_data.set_token(generated_token);
|
||||
twofactor.data = twofactor_data.to_json();
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When user clicks on Manage email 2FA show the user the related information
|
||||
#[post("/two-factor/get-email", data = "<data>")]
|
||||
fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -80,14 +84,17 @@ fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) ->
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
let enabled = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
Some(x) => x.enabled,
|
||||
_ => false,
|
||||
};
|
||||
let (enabled, mfa_email) =
|
||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
||||
Some(x) => {
|
||||
let twofactor_data = EmailTokenData::from_json(&x.data)?;
|
||||
(true, json!(twofactor_data.email))
|
||||
}
|
||||
_ => (false, json!(null)),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": user.email,
|
||||
"Email": mfa_email,
|
||||
"Enabled": enabled,
|
||||
"Object": "twoFactorEmail"
|
||||
})))
|
||||
@@ -103,7 +110,7 @@ struct SendEmailData {
|
||||
|
||||
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
||||
#[post("/two-factor/send-email", data = "<data>")]
|
||||
fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -117,18 +124,18 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
|
||||
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
|
||||
if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
tf.delete(&conn)?;
|
||||
if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
tf.delete(&mut conn).await?;
|
||||
}
|
||||
|
||||
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
|
||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
|
||||
|
||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(&mut conn).await?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -143,7 +150,7 @@ struct EmailData {
|
||||
|
||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||
#[put("/two-factor/email", data = "<data>")]
|
||||
fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
let data: EmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -152,7 +159,8 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).map_res("Two factor not found")?;
|
||||
let mut twofactor =
|
||||
TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await.map_res("Two factor not found")?;
|
||||
|
||||
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
|
||||
@@ -168,9 +176,11 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
email_data.reset_token();
|
||||
twofactor.atype = TwoFactorType::Email as i32;
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(&mut conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": email_data.email,
|
||||
@@ -180,13 +190,19 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
}
|
||||
|
||||
/// Validate the email code when used as TwoFactor token mechanism
|
||||
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
let mut email_data = EmailTokenData::from_json(data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn)
|
||||
.await
|
||||
.map_res("Two factor not found")?;
|
||||
let issued_token = match &email_data.last_token {
|
||||
Some(t) => t,
|
||||
_ => err!("No token available"),
|
||||
_ => err!(
|
||||
"No token available",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
),
|
||||
};
|
||||
|
||||
if !crypto::ct_eq(issued_token, token) {
|
||||
@@ -195,23 +211,34 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &
|
||||
email_data.reset_token();
|
||||
}
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
err!("Token is invalid")
|
||||
err!(
|
||||
"Token is invalid",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
email_data.reset_token();
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
|
||||
let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid.");
|
||||
let max_time = CONFIG.email_expiration_time() as i64;
|
||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
||||
err!("Token has expired")
|
||||
err!(
|
||||
"Token has expired",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Data stored in the TwoFactor table in the db
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct EmailTokenData {
|
||||
@@ -307,18 +334,4 @@ mod tests {
|
||||
// If it's smaller than 3 characters it should only show asterisks.
|
||||
assert_eq!(result, "***@example.ext");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token() {
|
||||
let result = crypto::generate_token(19).unwrap();
|
||||
|
||||
assert_eq!(result.chars().count(), 19);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token_too_large() {
|
||||
let result = crypto::generate_token(20);
|
||||
|
||||
assert!(result.is_err(), "too large token should give an error");
|
||||
}
|
||||
}
|
||||
|
@@ -1,32 +1,36 @@
|
||||
use chrono::{Duration, Utc};
|
||||
use data_encoding::BASE32;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
auth::Headers,
|
||||
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
auth::{ClientHeaders, ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, User},
|
||||
DbConn,
|
||||
},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
pub mod authenticator;
|
||||
pub mod duo;
|
||||
pub mod email;
|
||||
pub mod u2f;
|
||||
pub mod webauthn;
|
||||
pub mod yubikey;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut routes = routes![get_twofactor, get_recover, recover, disable_twofactor, disable_twofactor_put,];
|
||||
let mut routes = routes![
|
||||
get_twofactor,
|
||||
get_recover,
|
||||
recover,
|
||||
disable_twofactor,
|
||||
disable_twofactor_put,
|
||||
get_device_verification_settings,
|
||||
];
|
||||
|
||||
routes.append(&mut authenticator::routes());
|
||||
routes.append(&mut duo::routes());
|
||||
routes.append(&mut email::routes());
|
||||
routes.append(&mut u2f::routes());
|
||||
routes.append(&mut webauthn::routes());
|
||||
routes.append(&mut yubikey::routes());
|
||||
|
||||
@@ -34,8 +38,8 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[get("/two-factor")]
|
||||
fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
|
||||
async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||
|
||||
Json(json!({
|
||||
@@ -69,13 +73,18 @@ struct RecoverTwoFactor {
|
||||
}
|
||||
|
||||
#[post("/two-factor/recover", data = "<data>")]
|
||||
fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
|
||||
async fn recover(
|
||||
data: JsonUpcase<RecoverTwoFactor>,
|
||||
client_headers: ClientHeaders,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
let data: RecoverTwoFactor = data.into_inner().data;
|
||||
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
||||
let mut user = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
};
|
||||
@@ -91,19 +100,21 @@ fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
// Remove all twofactors from the user
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
|
||||
log_user_event(EventType::UserRecovered2fa as i32, &user.uuid, client_headers.device_type, &ip.ip, &mut conn).await;
|
||||
|
||||
// Remove the recovery code, not needed without twofactors
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)?;
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
fn _generate_recover_code(user: &mut User, conn: &DbConn) {
|
||||
async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
||||
if user.totp_recover.is_none() {
|
||||
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
|
||||
let totp_recover = crypto::encode_random_bytes::<20>(BASE32);
|
||||
user.totp_recover = Some(totp_recover);
|
||||
user.save(conn).ok();
|
||||
user.save(conn).await.ok();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,7 +126,12 @@ struct DisableTwoFactorData {
|
||||
}
|
||||
|
||||
#[post("/two-factor/disable", data = "<data>")]
|
||||
fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn disable_twofactor(
|
||||
data: JsonUpcase<DisableTwoFactorData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
let data: DisableTwoFactorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
let user = headers.user;
|
||||
@@ -126,8 +142,27 @@ fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, c
|
||||
|
||||
let type_ = data.Type.into_i32()?;
|
||||
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
twofactor.delete(&conn)?;
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
twofactor.delete(&mut conn).await?;
|
||||
log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
}
|
||||
|
||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
|
||||
|
||||
if twofactor_disabled {
|
||||
for user_org in
|
||||
UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &mut conn)
|
||||
.await
|
||||
.into_iter()
|
||||
{
|
||||
if user_org.atype < UserOrgType::Admin {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &mut conn).await.unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||
}
|
||||
user_org.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -138,6 +173,61 @@ fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, c
|
||||
}
|
||||
|
||||
#[put("/two-factor/disable", data = "<data>")]
|
||||
fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn)
|
||||
async fn disable_twofactor_put(
|
||||
data: JsonUpcase<DisableTwoFactorData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn, ip).await
|
||||
}
|
||||
|
||||
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||
debug!("Sending notifications for incomplete 2FA logins");
|
||||
|
||||
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut conn = match pool.get().await {
|
||||
Ok(conn) => conn,
|
||||
_ => {
|
||||
error!("Failed to get DB connection in send_incomplete_2fa_notifications()");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
|
||||
let time_before = now - time_limit;
|
||||
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
||||
for login in incomplete_logins {
|
||||
let user = User::find_by_uuid(&login.user_uuid, &mut conn).await.expect("User not found");
|
||||
info!(
|
||||
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
|
||||
user.email, login.ip_address
|
||||
);
|
||||
mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name)
|
||||
.await
|
||||
.expect("Error sending incomplete 2FA email");
|
||||
login.delete(&mut conn).await.expect("Error deleting incomplete 2FA record");
|
||||
}
|
||||
}
|
||||
|
||||
// This function currently is just a dummy and the actual part is not implemented yet.
|
||||
// This also prevents 404 errors.
|
||||
//
|
||||
// See the following Bitwarden PR's regarding this feature.
|
||||
// https://github.com/bitwarden/clients/pull/2843
|
||||
// https://github.com/bitwarden/clients/pull/2839
|
||||
// https://github.com/bitwarden/server/pull/2016
|
||||
//
|
||||
// The HTML part is hidden via the CSS patches done via the bw_web_build repo
|
||||
#[get("/two-factor/get-device-verification-settings")]
|
||||
fn get_device_verification_settings(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||
Json(json!({
|
||||
"isDeviceVerificationSectionEnabled":false,
|
||||
"unknownDeviceVerificationEnabled":false,
|
||||
"object":"deviceVerificationSettings"
|
||||
}))
|
||||
}
|
||||
|
@@ -1,352 +0,0 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use u2f::{
|
||||
messages::{RegisterResponse, SignResponse, U2fSignRequest},
|
||||
protocol::{Challenge, U2f},
|
||||
register::Registration,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
PasswordData,
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const U2F_VERSION: &str = "U2F_V2";
|
||||
|
||||
static APP_ID: Lazy<String> = Lazy::new(|| format!("{}/app-id.json", &CONFIG.domain()));
|
||||
static U2F: Lazy<U2f> = Lazy::new(|| U2f::new(APP_ID.clone()));
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![generate_u2f, generate_u2f_challenge, activate_u2f, activate_u2f_put, delete_u2f,]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-u2f", data = "<data>")]
|
||||
fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.domain_set() {
|
||||
err!("`DOMAIN` environment variable is not set. U2F disabled")
|
||||
}
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let (enabled, keys) = get_u2f_registrations(&headers.user.uuid, &conn)?;
|
||||
let keys_json: Vec<Value> = keys.iter().map(U2FRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": enabled,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-u2f-challenge", data = "<data>")]
|
||||
fn generate_u2f_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let _type = TwoFactorType::U2fRegisterChallenge;
|
||||
let challenge = _create_u2f_challenge(&headers.user.uuid, _type, &conn).challenge;
|
||||
|
||||
Ok(Json(json!({
|
||||
"UserId": headers.user.uuid,
|
||||
"AppId": APP_ID.to_string(),
|
||||
"Challenge": challenge,
|
||||
"Version": U2F_VERSION,
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EnableU2FData {
|
||||
Id: NumberOrString,
|
||||
// 1..5
|
||||
Name: String,
|
||||
MasterPasswordHash: String,
|
||||
DeviceResponse: String,
|
||||
}
|
||||
|
||||
// This struct is referenced from the U2F lib
|
||||
// because it doesn't implement Deserialize
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(remote = "Registration")]
|
||||
struct RegistrationDef {
|
||||
key_handle: Vec<u8>,
|
||||
pub_key: Vec<u8>,
|
||||
attestation_cert: Option<Vec<u8>>,
|
||||
device_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct U2FRegistration {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
#[serde(with = "RegistrationDef")]
|
||||
pub reg: Registration,
|
||||
pub counter: u32,
|
||||
compromised: bool,
|
||||
pub migrated: Option<bool>,
|
||||
}
|
||||
|
||||
impl U2FRegistration {
|
||||
fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.id,
|
||||
"Name": self.name,
|
||||
"Compromised": self.compromised,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// This struct is copied from the U2F lib
|
||||
// to add an optional error code
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RegisterResponseCopy {
|
||||
pub registration_data: String,
|
||||
pub version: String,
|
||||
pub client_data: String,
|
||||
|
||||
pub error_code: Option<NumberOrString>,
|
||||
}
|
||||
|
||||
impl From<RegisterResponseCopy> for RegisterResponse {
|
||||
fn from(r: RegisterResponseCopy) -> RegisterResponse {
|
||||
RegisterResponse {
|
||||
registration_data: r.registration_data,
|
||||
version: r.version,
|
||||
client_data: r.client_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/two-factor/u2f", data = "<data>")]
|
||||
fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: EnableU2FData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let tf_type = TwoFactorType::U2fRegisterChallenge as i32;
|
||||
let tf_challenge = match TwoFactor::find_by_user_and_type(&user.uuid, tf_type, &conn) {
|
||||
Some(c) => c,
|
||||
None => err!("Can't recover challenge"),
|
||||
};
|
||||
|
||||
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
|
||||
tf_challenge.delete(&conn)?;
|
||||
|
||||
let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
|
||||
|
||||
let error_code = response.error_code.clone().map_or("0".into(), NumberOrString::into_string);
|
||||
|
||||
if error_code != "0" {
|
||||
err!("Error registering U2F token")
|
||||
}
|
||||
|
||||
let registration = U2F.register_response(challenge, response.into())?;
|
||||
let full_registration = U2FRegistration {
|
||||
id: data.Id.into_i32()?,
|
||||
name: data.Name,
|
||||
reg: registration,
|
||||
compromised: false,
|
||||
counter: 0,
|
||||
migrated: None,
|
||||
};
|
||||
|
||||
let mut regs = get_u2f_registrations(&user.uuid, &conn)?.1;
|
||||
|
||||
// TODO: Check that there is no repeat Id
|
||||
regs.push(full_registration);
|
||||
save_u2f_registrations(&user.uuid, ®s, &conn)?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
|
||||
let keys_json: Vec<Value> = regs.iter().map(U2FRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/two-factor/u2f", data = "<data>")]
|
||||
fn activate_u2f_put(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_u2f(data, headers, conn)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct DeleteU2FData {
|
||||
Id: NumberOrString,
|
||||
MasterPasswordHash: String,
|
||||
}
|
||||
|
||||
#[delete("/two-factor/u2f", data = "<data>")]
|
||||
fn delete_u2f(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: DeleteU2FData = data.into_inner().data;
|
||||
|
||||
let id = data.Id.into_i32()?;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::U2f as i32;
|
||||
let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, type_, &conn) {
|
||||
Some(tf) => tf,
|
||||
None => err!("U2F data not found!"),
|
||||
};
|
||||
|
||||
let mut data: Vec<U2FRegistration> = match serde_json::from_str(&tf.data) {
|
||||
Ok(d) => d,
|
||||
Err(_) => err!("Error parsing U2F data"),
|
||||
};
|
||||
|
||||
data.retain(|r| r.id != id);
|
||||
|
||||
let new_data_str = serde_json::to_string(&data)?;
|
||||
|
||||
tf.data = new_data_str;
|
||||
tf.save(&conn)?;
|
||||
|
||||
let keys_json: Vec<Value> = data.iter().map(U2FRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge {
|
||||
let challenge = U2F.generate_challenge().unwrap();
|
||||
|
||||
TwoFactor::new(user_uuid.into(), type_, serde_json::to_string(&challenge).unwrap())
|
||||
.save(conn)
|
||||
.expect("Error saving challenge");
|
||||
|
||||
challenge
|
||||
}
|
||||
|
||||
fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult {
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(conn)
|
||||
}
|
||||
|
||||
fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2FRegistration>), Error> {
|
||||
let type_ = TwoFactorType::U2f as i32;
|
||||
let (enabled, regs) = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
Some(tf) => (tf.enabled, tf.data),
|
||||
None => return Ok((false, Vec::new())), // If no data, return empty list
|
||||
};
|
||||
|
||||
let data = match serde_json::from_str(®s) {
|
||||
Ok(d) => d,
|
||||
Err(_) => {
|
||||
// If error, try old format
|
||||
let mut old_regs = _old_parse_registrations(®s);
|
||||
|
||||
if old_regs.len() != 1 {
|
||||
err!("The old U2F format only allows one device")
|
||||
}
|
||||
|
||||
// Convert to new format
|
||||
let new_regs = vec![U2FRegistration {
|
||||
id: 1,
|
||||
name: "Unnamed U2F key".into(),
|
||||
reg: old_regs.remove(0),
|
||||
compromised: false,
|
||||
counter: 0,
|
||||
migrated: None,
|
||||
}];
|
||||
|
||||
// Save new format
|
||||
save_u2f_registrations(user_uuid, &new_regs, conn)?;
|
||||
|
||||
new_regs
|
||||
}
|
||||
};
|
||||
|
||||
Ok((enabled, data))
|
||||
}
|
||||
|
||||
fn _old_parse_registrations(registations: &str) -> Vec<Registration> {
|
||||
#[derive(Deserialize)]
|
||||
struct Helper(#[serde(with = "RegistrationDef")] Registration);
|
||||
|
||||
let regs: Vec<Value> = serde_json::from_str(registations).expect("Can't parse Registration data");
|
||||
|
||||
regs.into_iter().map(|r| serde_json::from_value(r).unwrap()).map(|Helper(r)| r).collect()
|
||||
}
|
||||
|
||||
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
|
||||
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
|
||||
|
||||
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.reg).collect();
|
||||
|
||||
if registrations.is_empty() {
|
||||
err!("No U2F devices registered")
|
||||
}
|
||||
|
||||
Ok(U2F.sign_request(challenge, registrations))
|
||||
}
|
||||
|
||||
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
|
||||
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, conn);
|
||||
|
||||
let challenge = match tf_challenge {
|
||||
Some(tf_challenge) => {
|
||||
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
|
||||
tf_challenge.delete(conn)?;
|
||||
challenge
|
||||
}
|
||||
None => err!("Can't recover login challenge"),
|
||||
};
|
||||
let response: SignResponse = serde_json::from_str(response)?;
|
||||
let mut registrations = get_u2f_registrations(user_uuid, conn)?.1;
|
||||
if registrations.is_empty() {
|
||||
err!("No U2F devices registered")
|
||||
}
|
||||
|
||||
for reg in &mut registrations {
|
||||
let response = U2F.sign_response(challenge.clone(), reg.reg.clone(), response.clone(), reg.counter);
|
||||
match response {
|
||||
Ok(new_counter) => {
|
||||
reg.counter = new_counter;
|
||||
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
Err(u2f::u2ferror::U2fError::CounterTooLow) => {
|
||||
reg.compromised = true;
|
||||
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||
|
||||
err!("This device might be compromised!");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("E {:#}", e);
|
||||
// break;
|
||||
}
|
||||
}
|
||||
}
|
||||
err!("error verifying response")
|
||||
}
|
@@ -1,15 +1,17 @@
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use url::Url;
|
||||
use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
},
|
||||
auth::Headers,
|
||||
auth::{ClientIp, Headers},
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
@@ -20,21 +22,42 @@ pub fn routes() -> Vec<Route> {
|
||||
routes![get_webauthn, generate_webauthn_challenge, activate_webauthn, activate_webauthn_put, delete_webauthn,]
|
||||
}
|
||||
|
||||
// Some old u2f structs still needed for migrating from u2f to WebAuthn
|
||||
// Both `struct Registration` and `struct U2FRegistration` can be removed if we remove the u2f to WebAuthn migration
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Registration {
|
||||
pub key_handle: Vec<u8>,
|
||||
pub pub_key: Vec<u8>,
|
||||
pub attestation_cert: Option<Vec<u8>>,
|
||||
pub device_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct U2FRegistration {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
#[serde(with = "Registration")]
|
||||
pub reg: Registration,
|
||||
pub counter: u32,
|
||||
compromised: bool,
|
||||
pub migrated: Option<bool>,
|
||||
}
|
||||
|
||||
struct WebauthnConfig {
|
||||
url: String,
|
||||
origin: Url,
|
||||
rpid: String,
|
||||
}
|
||||
|
||||
impl WebauthnConfig {
|
||||
fn load() -> Webauthn<Self> {
|
||||
let domain = CONFIG.domain();
|
||||
let domain_origin = CONFIG.domain_origin();
|
||||
Webauthn::new(Self {
|
||||
rpid: reqwest::Url::parse(&domain)
|
||||
.map(|u| u.domain().map(str::to_owned))
|
||||
.ok()
|
||||
.flatten()
|
||||
.unwrap_or_default(),
|
||||
rpid: Url::parse(&domain).map(|u| u.domain().map(str::to_owned)).ok().flatten().unwrap_or_default(),
|
||||
url: domain,
|
||||
origin: Url::parse(&domain_origin).unwrap(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -44,26 +67,19 @@ impl webauthn_rs::WebauthnConfig for WebauthnConfig {
|
||||
&self.url
|
||||
}
|
||||
|
||||
fn get_origin(&self) -> &str {
|
||||
&self.url
|
||||
fn get_origin(&self) -> &Url {
|
||||
&self.origin
|
||||
}
|
||||
|
||||
fn get_relying_party_id(&self) -> &str {
|
||||
&self.rpid
|
||||
}
|
||||
}
|
||||
|
||||
impl webauthn_rs::WebauthnConfig for &WebauthnConfig {
|
||||
fn get_relying_party_name(&self) -> &str {
|
||||
&self.url
|
||||
}
|
||||
|
||||
fn get_origin(&self) -> &str {
|
||||
&self.url
|
||||
}
|
||||
|
||||
fn get_relying_party_id(&self) -> &str {
|
||||
&self.rpid
|
||||
/// We have WebAuthn configured to discourage user verification
|
||||
/// if we leave this enabled, it will cause verification issues when a keys send UV=1.
|
||||
/// Upstream (the library they use) ignores this when set to discouraged, so we should too.
|
||||
fn get_require_uv_consistency(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,7 +103,7 @@ impl WebauthnRegistration {
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||
fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.domain_set() {
|
||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||
}
|
||||
@@ -96,7 +112,7 @@ fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn)
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &conn)?;
|
||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &mut conn).await?;
|
||||
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -107,12 +123,13 @@ fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn)
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||
fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &conn)?
|
||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &mut conn)
|
||||
.await?
|
||||
.1
|
||||
.into_iter()
|
||||
.map(|r| r.credential.cred_id) // We return the credentialIds to the clients to avoid double registering
|
||||
@@ -128,7 +145,7 @@ fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers,
|
||||
)?;
|
||||
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&conn)?;
|
||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||
|
||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||
challenge_value["status"] = "ok".into();
|
||||
@@ -225,7 +242,12 @@ impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||
}
|
||||
|
||||
#[post("/two-factor/webauthn", data = "<data>")]
|
||||
fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_webauthn(
|
||||
data: JsonUpcase<EnableWebauthnData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
let data: EnableWebauthnData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -235,10 +257,10 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
|
||||
// Retrieve and delete the saved challenge state
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
||||
let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
Some(tf) => {
|
||||
let state: RegistrationState = serde_json::from_str(&tf.data)?;
|
||||
tf.delete(&conn)?;
|
||||
tf.delete(&mut conn).await?;
|
||||
state
|
||||
}
|
||||
None => err!("Can't recover challenge"),
|
||||
@@ -248,7 +270,7 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
let (credential, _data) =
|
||||
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
|
||||
|
||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn)?.1;
|
||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
|
||||
// TODO: Check for repeated ID's
|
||||
registrations.push(WebauthnRegistration {
|
||||
id: data.Id.into_i32()?,
|
||||
@@ -259,8 +281,12 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
});
|
||||
|
||||
// Save the registrations and return them
|
||||
TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?).save(&conn)?;
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
.save(&mut conn)
|
||||
.await?;
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
@@ -271,8 +297,13 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
}
|
||||
|
||||
#[put("/two-factor/webauthn", data = "<data>")]
|
||||
fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn)
|
||||
async fn activate_webauthn_put(
|
||||
data: JsonUpcase<EnableWebauthnData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn, ip).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -283,33 +314,34 @@ struct DeleteU2FData {
|
||||
}
|
||||
|
||||
#[delete("/two-factor/webauthn", data = "<data>")]
|
||||
fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let id = data.data.Id.into_i32()?;
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::Webauthn as i32;
|
||||
let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, type_, &conn) {
|
||||
Some(tf) => tf,
|
||||
None => err!("Webauthn data not found!"),
|
||||
};
|
||||
let mut tf =
|
||||
match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("Webauthn data not found!"),
|
||||
};
|
||||
|
||||
let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?;
|
||||
|
||||
let item_pos = match data.iter().position(|r| r.id != id) {
|
||||
let item_pos = match data.iter().position(|r| r.id == id) {
|
||||
Some(p) => p,
|
||||
None => err!("Webauthn entry not found"),
|
||||
};
|
||||
|
||||
let removed_item = data.remove(item_pos);
|
||||
tf.data = serde_json::to_string(&data)?;
|
||||
tf.save(&conn)?;
|
||||
tf.save(&mut conn).await?;
|
||||
drop(tf);
|
||||
|
||||
// If entry is migrated from u2f, delete the u2f entry as well
|
||||
if let Some(mut u2f) = TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &conn) {
|
||||
use crate::api::core::two_factor::u2f::U2FRegistration;
|
||||
if let Some(mut u2f) =
|
||||
TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &mut conn).await
|
||||
{
|
||||
let mut data: Vec<U2FRegistration> = match serde_json::from_str(&u2f.data) {
|
||||
Ok(d) => d,
|
||||
Err(_) => err!("Error parsing U2F data"),
|
||||
@@ -319,7 +351,7 @@ fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbCo
|
||||
let new_data_str = serde_json::to_string(&data)?;
|
||||
|
||||
u2f.data = new_data_str;
|
||||
u2f.save(&conn)?;
|
||||
u2f.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
||||
@@ -331,18 +363,21 @@ fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbCo
|
||||
})))
|
||||
}
|
||||
|
||||
pub fn get_webauthn_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||
pub async fn get_webauthn_registrations(
|
||||
user_uuid: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||
let type_ = TwoFactorType::Webauthn as i32;
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
||||
Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)),
|
||||
None => Ok((false, Vec::new())), // If no data, return empty list
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
|
||||
pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> JsonResult {
|
||||
// Load saved credentials
|
||||
let creds: Vec<Credential> =
|
||||
get_webauthn_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.credential).collect();
|
||||
get_webauthn_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.credential).collect();
|
||||
|
||||
if creds.is_empty() {
|
||||
err!("No Webauthn devices registered")
|
||||
@@ -354,27 +389,33 @@ pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
|
||||
|
||||
// Save the challenge state for later validation
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
|
||||
.save(conn)?;
|
||||
.save(conn)
|
||||
.await?;
|
||||
|
||||
// Return challenge to the clients
|
||||
Ok(Json(serde_json::to_value(response.public_key)?))
|
||||
}
|
||||
|
||||
pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
|
||||
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
||||
Some(tf) => {
|
||||
let state: AuthenticationState = serde_json::from_str(&tf.data)?;
|
||||
tf.delete(conn)?;
|
||||
tf.delete(conn).await?;
|
||||
state
|
||||
}
|
||||
None => err!("Can't recover login challenge"),
|
||||
None => err!(
|
||||
"Can't recover login challenge",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
),
|
||||
};
|
||||
|
||||
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
|
||||
let rsp: PublicKeyCredential = rsp.data.into();
|
||||
|
||||
let mut registrations = get_webauthn_registrations(user_uuid, conn)?.1;
|
||||
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
|
||||
|
||||
// If the credential we received is migrated from U2F, enable the U2F compatibility
|
||||
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
|
||||
@@ -385,10 +426,16 @@ pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -
|
||||
reg.credential.counter = auth_data.counter;
|
||||
|
||||
TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
.save(conn)?;
|
||||
.save(conn)
|
||||
.await?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
err!("Credential not present")
|
||||
err!(
|
||||
"Credential not present",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user