Compare commits
604 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
3dbfc484a5 | ||
|
4ec2507073 | ||
|
ab65d7989b | ||
|
8707728cdb | ||
|
631d022e17 | ||
|
211f4492fa | ||
|
61f9081827 | ||
|
a8e5384c4a | ||
|
1c7338c7c4 | ||
|
08f37b9935 | ||
|
4826ddca4c | ||
|
2b32b6f78c | ||
|
a6cfdddfd8 | ||
|
814ce9a6ac | ||
|
1bee46f64b | ||
|
556d945396 | ||
|
664b480c71 | ||
|
84e901b7d2 | ||
|
839b2bc950 | ||
|
6050c8dac5 | ||
|
0a6b797e6e | ||
|
fb6f441a4f | ||
|
9876aedd67 | ||
|
19e671ff25 | ||
|
60964c07e6 | ||
|
e4894524e4 | ||
|
e7f083dee9 | ||
|
1074315a87 | ||
|
c56bf38079 | ||
|
3c0cac623d | ||
|
550794b127 | ||
|
e818a0bf37 | ||
|
2aedff50e8 | ||
|
84a23008f4 | ||
|
44e9e1a58e | ||
|
e4606431d1 | ||
|
5b7d7390b0 | ||
|
a05187c0ff | ||
|
8e34495e73 | ||
|
4219249e11 | ||
|
bd883de70e | ||
|
2d66292350 | ||
|
adf67a8ee8 | ||
|
f40f5b8399 | ||
|
2d6ca0ea95 | ||
|
06a10e2c5a | ||
|
445680fb84 | ||
|
83376544d8 | ||
|
04a17dcdef | ||
|
0851561392 | ||
|
95cd6deda6 | ||
|
636f16dc66 | ||
|
9e5b049dca | ||
|
23aa9088f3 | ||
|
4f0ed06b06 | ||
|
349c97efaf | ||
|
8b05a5d192 | ||
|
83bf77d713 | ||
|
4d5c047ddc | ||
|
147c9c7b50 | ||
|
6515a2fcad | ||
|
4a2ed553df | ||
|
ba492c0602 | ||
|
1ec049e2b5 | ||
|
0fb8563b13 | ||
|
f906f6230a | ||
|
951ba55123 | ||
|
18abf226be | ||
|
393645617e | ||
|
5bf243b675 | ||
|
cfba8347a3 | ||
|
55c1b6e8d5 | ||
|
3d7e80a7aa | ||
|
5866338de4 | ||
|
271e3ae757 | ||
|
48cc31a59f | ||
|
6a7cee4e7e | ||
|
f850dbb310 | ||
|
07099df41a | ||
|
0c0a80720e | ||
|
ae437f70a3 | ||
|
3d11f4cd16 | ||
|
3bd4e42fb0 | ||
|
89e94b1d91 | ||
|
0b28ab3be1 | ||
|
c5bcc340fa | ||
|
bff54fbfdb | ||
|
867c6ba056 | ||
|
d1ecf03f44 | ||
|
fc43608eec | ||
|
15dd05c78d | ||
|
aa6f774f65 | ||
|
379f885354 | ||
|
39a5f2dbe8 | ||
|
0daaa9b175 | ||
|
0c085d21ce | ||
|
dcaaa430f0 | ||
|
2cda54ceff | ||
|
525e6bb65a | ||
|
62cebebd3d | ||
|
3646f14042 | ||
|
813e889c97 | ||
|
8bcd0ab0c6 | ||
|
5725d297b4 | ||
|
a428f05e77 | ||
|
467ecfdc99 | ||
|
ed8091a994 | ||
|
56cad93e0f | ||
|
3cf67e0b8d | ||
|
5800aceb2d | ||
|
729b563160 | ||
|
6b5618a5fc | ||
|
2aa72eb240 | ||
|
c8655c4f89 | ||
|
daaa03d1b3 | ||
|
9e5b94924f | ||
|
f21089900e | ||
|
0c0e632bc9 | ||
|
a13a5bd1d8 | ||
|
3b34b429f3 | ||
|
97ffd17789 | ||
|
10c5476d31 | ||
|
d3626eba2a | ||
|
de157b2654 | ||
|
337cbfaf22 | ||
|
f88b6d961e | ||
|
0426051541 | ||
|
4556f668de | ||
|
da8225a3bd | ||
|
f10e6b6ac2 | ||
|
7ec00d3850 | ||
|
8f8d7418ed | ||
|
af6d17b701 | ||
|
61183d001c | ||
|
024d12db08 | ||
|
dc7951efaf | ||
|
06e14fea55 | ||
|
0f656b4889 | ||
|
6fa1dc50be | ||
|
2bb41367bc | ||
|
20d8886bfa | ||
|
59ef82b740 | ||
|
fc543154c0 | ||
|
569b464157 | ||
|
adf83c698d | ||
|
8fcbc58ee2 | ||
|
2dcbb2be59 | ||
|
7026e004e1 | ||
|
a3084feaee | ||
|
e7d36de784 | ||
|
54cc47b14e | ||
|
fac44888cd | ||
|
9f056523c9 | ||
|
0af1ef387d | ||
|
f95f40be15 | ||
|
5c859e2e6c | ||
|
03ff5e6ece | ||
|
52d696aa74 | ||
|
a4e80712dd | ||
|
a947e434f0 | ||
|
2eb4f290a5 | ||
|
8ae799a771 | ||
|
9a5f3a5015 | ||
|
1ca0d6e245 | ||
|
7f69eebeb1 | ||
|
32bd9b83a3 | ||
|
477d60de49 | ||
|
1ba8275dcb | ||
|
a0a4994250 | ||
|
32dfa41970 | ||
|
f92efda0f0 | ||
|
3b0f643e9d | ||
|
5bcee24f88 | ||
|
9e3d7ea44c | ||
|
8cc6dac893 | ||
|
b7c4316c77 | ||
|
0c295d5e6e | ||
|
bc49d1f90d | ||
|
6f6d9dee83 | ||
|
cef5dd4a46 | ||
|
79061c0eb5 | ||
|
6e2c3fc1cc | ||
|
e301fe137f | ||
|
af69c83db2 | ||
|
53fa8da5b1 | ||
|
c58aac585b | ||
|
8c1117fcbf | ||
|
a6dd4f1206 | ||
|
5af1799991 | ||
|
a20a641de3 | ||
|
8abd38573b | ||
|
78abdf0e9d | ||
|
dc031d8d86 | ||
|
de6330b09d | ||
|
68bcc7a4b8 | ||
|
c04a1352cb | ||
|
5d1c11ceba | ||
|
a2aa7c9bc2 | ||
|
b3a351ccb2 | ||
|
679bc7a59b | ||
|
a72d0b518f | ||
|
6741b25907 | ||
|
24b5784f02 | ||
|
eb9b481eba | ||
|
64edc49392 | ||
|
0d1753ac74 | ||
|
a6558f5548 | ||
|
62dfeb80f2 | ||
|
26cd5d9643 | ||
|
e65fbbfc21 | ||
|
a2162f4d69 | ||
|
c9ed9aa733 | ||
|
9b20decdc1 | ||
|
adaefc8628 | ||
|
c6c45c4c49 | ||
|
95494083f2 | ||
|
686474f815 | ||
|
2c6bd8c9dc | ||
|
9366e31452 | ||
|
96ff32fb2f | ||
|
9342fa5744 | ||
|
50fc22966c | ||
|
4fab4c74ff | ||
|
e38e1a5d5f | ||
|
cc91ac6cc0 | ||
|
2d8c8e18f7 | ||
|
b17e2da2cf | ||
|
d121cce0d2 | ||
|
0eba7a88fa | ||
|
34ac16e9d7 | ||
|
906d9e2f1a | ||
|
623d84aeb5 | ||
|
f8122cd2ca | ||
|
9b7e86efc2 | ||
|
e7ccfbdd0e | ||
|
acc1474394 | ||
|
c90b3031a6 | ||
|
aaffb2e007 | ||
|
e0e95e95e4 | ||
|
fa70b440d0 | ||
|
42acb2ebb6 | ||
|
174bea8d6e | ||
|
f68a57950b | ||
|
f747bf126b | ||
|
1ca197fd46 | ||
|
63d05d929b | ||
|
ef5bf5d326 | ||
|
9d6e35d803 | ||
|
0cccdcab83 | ||
|
6607faa390 | ||
|
6fcf18ab51 | ||
|
d122c10573 | ||
|
ae9553ca1c | ||
|
ff919039c9 | ||
|
80eb15d46a | ||
|
c36b870c54 | ||
|
b7cbca590c | ||
|
606a1bbfcb | ||
|
3e5369c8dd | ||
|
dd5e4cec73 | ||
|
a31a040abd | ||
|
f0125b95c1 | ||
|
072f2e24c2 | ||
|
36b5350f9b | ||
|
c7489c9fdf | ||
|
3181e4e96e | ||
|
2ee0d53c5f | ||
|
dfa629ecc7 | ||
|
92dc48b882 | ||
|
367e1ce289 | ||
|
7390f34355 | ||
|
c47d9f6593 | ||
|
5399ee8208 | ||
|
117045e6d3 | ||
|
912ad64555 | ||
|
00855ee31d | ||
|
c18a273b4a | ||
|
ca24a4adf1 | ||
|
a263aaa481 | ||
|
0a20ba0020 | ||
|
6541600af6 | ||
|
525979d5d9 | ||
|
7dd1959eba | ||
|
e266b39254 | ||
|
e935989fee | ||
|
25c401f64d | ||
|
18b72da657 | ||
|
e8e6c89927 | ||
|
fd5f657334 | ||
|
da9605f2d2 | ||
|
7030de32d5 | ||
|
b67c5b77be | ||
|
d30878c4ea | ||
|
6be26f0a38 | ||
|
34a6bfaefa | ||
|
1c8749eb4d | ||
|
1198c36a2b | ||
|
41e6c1a383 | ||
|
0042c3e4a7 | ||
|
724190f262 | ||
|
6867d23ca2 | ||
|
de26af0c2d | ||
|
3f223a7514 | ||
|
23f5a62d61 | ||
|
81e2054f59 | ||
|
f9337effa5 | ||
|
2972904eb8 | ||
|
bdd918b4d4 | ||
|
88085fe17b | ||
|
2020a302d0 | ||
|
ab2dd0f300 | ||
|
8e6fd4b4a1 | ||
|
988d24927e | ||
|
e945d16fcf | ||
|
f1c0aa4f83 | ||
|
68362d06b3 | ||
|
f65c0e2ac8 | ||
|
0f588ced03 | ||
|
b0f03bb49c | ||
|
5063661028 | ||
|
7e66ab78ff | ||
|
665e275dc5 | ||
|
a6da728cca | ||
|
04e02d7f9f | ||
|
7c739dd58e | ||
|
05a552910c | ||
|
c990837066 | ||
|
57aec37507 | ||
|
0c5b4476ad | ||
|
17141147a8 | ||
|
193c2fa860 | ||
|
6d01aaa80f | ||
|
ad60eaa0f3 | ||
|
d878face07 | ||
|
8bf8388cd6 | ||
|
b4db853bcb | ||
|
5ee94c0ba9 | ||
|
f108349547 | ||
|
d25e1ab94b | ||
|
79fee269ee | ||
|
ffe362f856 | ||
|
04bb15a802 | ||
|
4d9d649db9 | ||
|
2897c24e83 | ||
|
5964dc95f0 | ||
|
613b2519ed | ||
|
996b60e43d | ||
|
a6d09407b9 | ||
|
f2e9ddef4e | ||
|
ca417d3257 | ||
|
10dadfca06 | ||
|
bf73a8235f | ||
|
67a584c1d4 | ||
|
8e5f03972e | ||
|
d8abf8f98f | ||
|
cb348d2e05 | ||
|
aceb111024 | ||
|
b60a4a68c7 | ||
|
8b6dfe48b7 | ||
|
6154e03c05 | ||
|
d0b53a6a3d | ||
|
317aa679cf | ||
|
8d1bc2e539 | ||
|
50c46f6e9a | ||
|
4f1928778a | ||
|
5fcba3d7f5 | ||
|
4db42b07c4 | ||
|
cd3e2d7a5a | ||
|
d139e22042 | ||
|
892296e6d5 | ||
|
992ef399ed | ||
|
5afba46743 | ||
|
df0aa7949e | ||
|
353d2e6e01 | ||
|
f9375bb215 | ||
|
8d04ff66e7 | ||
|
e649b11511 | ||
|
bda19bdddf | ||
|
99fd92df21 | ||
|
1210310063 | ||
|
b093384385 | ||
|
cec45ae9bd | ||
|
e6dd584dd6 | ||
|
7cc74dabaf | ||
|
2336f102f9 | ||
|
cebe0f6442 | ||
|
d9c0c23819 | ||
|
aa355a96f9 | ||
|
4a85dd2480 | ||
|
213909baa5 | ||
|
6915a60332 | ||
|
52a50e9ade | ||
|
b7c9a346c1 | ||
|
2d90c6ac24 | ||
|
7f7b5447fd | ||
|
142f7bb50d | ||
|
d209df9e10 | ||
|
1b56f4266b | ||
|
d6dc6070f3 | ||
|
d66323b742 | ||
|
7b09d74b1f | ||
|
c0e3c2c5e1 | ||
|
06189a58fe | ||
|
f402dd81bb | ||
|
c885bbc947 | ||
|
63fb0e5a57 | ||
|
37d0792a7d | ||
|
c8040d2f63 | ||
|
dbcad65b68 | ||
|
226da67bc0 | ||
|
fee2b5c3fb | ||
|
6bbb3d53ae | ||
|
610b183cef | ||
|
1b64b9e164 | ||
|
b022be9ba8 | ||
|
7f11363725 | ||
|
4aa6dd22bb | ||
|
8feed2916f | ||
|
59eaa0aa0d | ||
|
d5e54cb576 | ||
|
8837660ba7 | ||
|
464a489b44 | ||
|
7035700c8d | ||
|
23c2921690 | ||
|
7d506f3633 | ||
|
b186813049 | ||
|
bfa82225da | ||
|
ffa2044563 | ||
|
d57b69952d | ||
|
5a13efefd3 | ||
|
2f9d7060bd | ||
|
0aa33a2cb4 | ||
|
fa7dbedd5d | ||
|
2ea9b66943 | ||
|
f3beaea9e9 | ||
|
39ae2f1f76 | ||
|
366b1050ec | ||
|
b3aab7a6ad | ||
|
aa8d050d6b | ||
|
5200f0e98d | ||
|
5f4abb1b7f | ||
|
dfe1e30d1b | ||
|
e27a5be47a | ||
|
56786a18f1 | ||
|
0d2399d485 | ||
|
5bfc7cfde3 | ||
|
723f0cbc1e | ||
|
b141f789f6 | ||
|
7445ee40f8 | ||
|
4a9a0f7e64 | ||
|
63aad2e5d2 | ||
|
d0baa23f9a | ||
|
7a7673103f | ||
|
05d4788d1d | ||
|
6f0dea1b56 | ||
|
439ef44973 | ||
|
2a525b42cb | ||
|
aee91acfdc | ||
|
17388ec43e | ||
|
bdc1cd13a7 | ||
|
42db4b5c77 | ||
|
53da073274 | ||
|
b010dde661 | ||
|
c9ec389b24 | ||
|
baa2841b04 | ||
|
6af5c86081 | ||
|
f60a6929a9 | ||
|
2aa97fa121 | ||
|
b59809af46 | ||
|
ed24d51d3e | ||
|
870f0d0932 | ||
|
31b77bf178 | ||
|
b525f9aa4c | ||
|
8409b31d6b | ||
|
b878495d64 | ||
|
945b85da2f | ||
|
d4577d161e | ||
|
3c8e1c3ca9 | ||
|
88dba8c4dd | ||
|
21bc3bfd53 | ||
|
4cb5122e90 | ||
|
0a2a8be0ff | ||
|
720a046610 | ||
|
64ae5d4f81 | ||
|
ff7e22c08a | ||
|
0c267d073f | ||
|
bbc6470f65 | ||
|
23f1f8a576 | ||
|
0e6f6e612a | ||
|
4d1b860dad | ||
|
6576914e55 | ||
|
12075639f3 | ||
|
3b9bfe55d0 | ||
|
a0c6a7c0de | ||
|
a2d716aec3 | ||
|
c1c60e3b68 | ||
|
ed6e852904 | ||
|
a54065420c | ||
|
aa5a05960e | ||
|
f41ba2a60f | ||
|
2215cfefb9 | ||
|
4289663a16 | ||
|
ea19c2250e | ||
|
638766b346 | ||
|
d1ff136552 | ||
|
46ec11de12 | ||
|
4283a49e0b | ||
|
1e32db8c41 | ||
|
0f944ec7e2 | ||
|
736dbc9553 | ||
|
b4a38f1f63 | ||
|
646186fe38 | ||
|
c2725916f4 | ||
|
fd334e2b7d | ||
|
f9feca1ce4 | ||
|
677fd2ff32 | ||
|
f49eb8eb4d | ||
|
b0e0d68632 | ||
|
f3c8c16d79 | ||
|
2dd5086916 | ||
|
7532072d50 | ||
|
382e6107fe | ||
|
e6c6609e19 | ||
|
4cb5918950 | ||
|
55030f3687 | ||
|
ef4072e4ff | ||
|
c78d383ed1 | ||
|
5b96270874 | ||
|
2c0742387b | ||
|
1704d14f29 | ||
|
2d7ffbf378 | ||
|
dfd63f85c0 | ||
|
cd0c49eaf6 | ||
|
080e38d227 | ||
|
1a664fba6a | ||
|
c915ef815d | ||
|
adea4ec54d | ||
|
387b5eb2dd | ||
|
6337af59ed | ||
|
475c7b8f16 | ||
|
ac120be1c6 | ||
|
b70316e6d3 | ||
|
0a0f620d0b | ||
|
9132cc4a30 | ||
|
e50edcadfb | ||
|
2685099720 | ||
|
6fa6eb18e8 | ||
|
bb79396f0e | ||
|
da9fd6b7d0 | ||
|
5b8067ef77 | ||
|
9eabcd5cae | ||
|
d6e0d4cbbd | ||
|
e5e6db2688 | ||
|
186fe24484 | ||
|
5da96d36e6 | ||
|
f4b1071e23 | ||
|
18291b6533 | ||
|
8095cb68bb | ||
|
04cd751556 | ||
|
7ce2372f51 | ||
|
aebda93afe | ||
|
2b7b1141eb | ||
|
1ff4ff72bf | ||
|
d27e91a9b0 | ||
|
7cf063b196 | ||
|
642f04d493 | ||
|
fc6e65e4b0 | ||
|
db5c98ec3b | ||
|
73c64af27e | ||
|
b3f7db813f | ||
|
59660ff087 | ||
|
69a69e8e04 | ||
|
1094f359c3 | ||
|
102ee3f871 | ||
|
acb5ab08a8 | ||
|
ae59472d9a | ||
|
5a07b193dc | ||
|
fd2edb9adc | ||
|
1d074f7b3f | ||
|
81984c4bce | ||
|
9c891baad1 | ||
|
b050c60807 | ||
|
e47a2fd0f3 | ||
|
42b9cc73ac | ||
|
edca4248aa | ||
|
b1b6bc9be0 | ||
|
818b254cef | ||
|
ddfac5e34b | ||
|
8b5c945bad | ||
|
50c5eb9c50 | ||
|
94be67eac1 | ||
|
5a05139efe | ||
|
a62dc102fb | ||
|
518d74ce21 | ||
|
7598997deb | ||
|
3c876dc202 | ||
|
1722742ab3 | ||
|
d9c0eb3cfc | ||
|
0d990e1dc0 | ||
|
60ed5ff99d | ||
|
5b98bd66ee | ||
|
abd20777fe | ||
|
7f0d0cf8a4 | ||
|
6e23a573fb |
@@ -1,13 +1,14 @@
|
||||
# shellcheck disable=SC2034,SC2148
|
||||
## Vaultwarden Configuration File
|
||||
## Uncomment any of the following lines to change the defaults
|
||||
##
|
||||
## Be aware that most of these settings will be overridden if they were changed
|
||||
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||
##
|
||||
## By default, vaultwarden expects for this file to be named ".env" and located
|
||||
## By default, Vaultwarden expects for this file to be named ".env" and located
|
||||
## in the current working directory. If this is not the case, the environment
|
||||
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||
## vaultwarden.
|
||||
## Vaultwarden.
|
||||
|
||||
## Main data folder
|
||||
# DATA_FOLDER=data
|
||||
@@ -71,6 +72,13 @@
|
||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||
# WEBSOCKET_PORT=3012
|
||||
|
||||
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||
# PUSH_ENABLED=true
|
||||
# PUSH_INSTALLATION_ID=CHANGEME
|
||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||
## Don't change this unless you know what you're doing.
|
||||
# PUSH_RELAY_BASE_URI=https://push.bitwarden.com
|
||||
|
||||
## Controls whether users are allowed to create Bitwarden Sends.
|
||||
## This setting applies globally to all users.
|
||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||
@@ -80,11 +88,34 @@
|
||||
## This setting applies globally to all users.
|
||||
# EMERGENCY_ACCESS_ALLOWED=true
|
||||
|
||||
## Controls whether event logging is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||
# ORG_EVENTS_ENABLED=false
|
||||
|
||||
## Number of days to retain events stored in the database.
|
||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||
# EVENTS_DAYS_RETAIN=
|
||||
|
||||
## BETA FEATURE: Groups
|
||||
## Controls whether group support is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default because this is a beta feature, it contains known issues!
|
||||
## KNOW WHAT YOU ARE DOING!
|
||||
# ORG_GROUPS_ENABLED=false
|
||||
|
||||
## Job scheduler settings
|
||||
##
|
||||
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
|
||||
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||
##
|
||||
## The schedule format is a bit different from crontab as crontab does not contains seconds.
|
||||
## You can test the the format here: https://crontab.guru, but remove the first digit!
|
||||
## SEC MIN HOUR DAY OF MONTH MONTH DAY OF WEEK
|
||||
## "0 30 9,12,15 1,15 May-Aug Mon,Wed,Fri"
|
||||
## "0 30 * * * * "
|
||||
## "0 30 1 * * * "
|
||||
##
|
||||
## How often (in ms) the job scheduler thread checks for jobs that need running.
|
||||
## Set to 0 to globally disable scheduled jobs.
|
||||
# JOB_POLL_INTERVAL_MS=30000
|
||||
@@ -102,12 +133,16 @@
|
||||
# INCOMPLETE_2FA_SCHEDULE="30 * * * * *"
|
||||
##
|
||||
## Cron schedule of the job that sends expiration reminders to emergency access grantors.
|
||||
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE="0 5 * * * *"
|
||||
## Defaults to hourly (3 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE="0 3 * * * *"
|
||||
##
|
||||
## Cron schedule of the job that grants emergency access requests that have met the required wait time.
|
||||
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 5 * * * *"
|
||||
## Defaults to hourly (7 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 7 * * * *"
|
||||
##
|
||||
## Cron schedule of the job that cleans old events from the event table.
|
||||
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
||||
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
||||
|
||||
## Enable extended logging, which shows timestamps and targets in the logs
|
||||
# EXTENDED_LOGGING=true
|
||||
@@ -133,7 +168,7 @@
|
||||
## Enable WAL for the DB
|
||||
## Set to false to avoid enabling WAL during startup.
|
||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||
## this setting only prevents vaultwarden from automatically enabling it on start.
|
||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||
## Please read project wiki page about this setting first before changing the value as it can
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
@@ -231,9 +266,15 @@
|
||||
## A comma-separated list means only those users can create orgs:
|
||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||
|
||||
## Token for the admin interface, preferably use a long random string
|
||||
## One option is to use 'openssl rand -base64 48'
|
||||
## Token for the admin interface, preferably an Argon2 PCH string
|
||||
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
||||
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
||||
## If not set, the admin panel is disabled
|
||||
## New Argon2 PHC string
|
||||
## Note that for some environments, like docker-compose you need to escape all the dollar signs `$` with an extra dollar sign like `$$`
|
||||
## Also, use single quotes (') instead of double quotes (") to enclose the string when needed
|
||||
# ADMIN_TOKEN='$argon2id$v=19$m=65540,t=3,p=4$MmeKRnGK5RW5mJS7h3TOL89GrpLPXJPAtTK8FTqj9HM$DqsstvoSAETl9YhnsXbf43WeaUwJC6JhViIvuPoig78'
|
||||
## Old plain text string (Will generate warnings in favor of Argon2)
|
||||
# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp
|
||||
|
||||
## Enable this to bypass the admin panel security. This option is only
|
||||
@@ -245,6 +286,10 @@
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Vaultwarden
|
||||
|
||||
## The number of hours after which an organization invite token, emergency access invite token,
|
||||
## email verification token and deletion request token will expire (must be at least 1)
|
||||
# INVITATION_EXPIRATION_HOURS=120
|
||||
|
||||
## Per-organization attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per organization.
|
||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||
@@ -266,9 +311,9 @@
|
||||
## This setting applies globally to all users.
|
||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||
|
||||
## Controls the PBBKDF password iterations to apply on the server
|
||||
## The change only applies when the password is changed
|
||||
# PASSWORD_ITERATIONS=100000
|
||||
## Number of server-side passwords hashing iterations for the password hash.
|
||||
## The default for new users. If changed, it will be updated during login for existing users.
|
||||
# PASSWORD_ITERATIONS=350000
|
||||
|
||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||
# PASSWORD_HINTS_ALLOWED=true
|
||||
@@ -298,11 +343,14 @@
|
||||
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||
|
||||
## Number of seconds, on average, between admin requests from the same IP address before rate limiting kicks in.
|
||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||
# ADMIN_RATELIMIT_SECONDS=300
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||
|
||||
## Set the lifetime of admin sessions to this value (in minutes).
|
||||
# ADMIN_SESSION_LIFETIME=20
|
||||
|
||||
## Yubico (Yubikey) Settings
|
||||
## Set your Client ID and Secret Key for Yubikey OTP
|
||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||
@@ -341,18 +389,23 @@
|
||||
# ROCKET_WORKERS=10
|
||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||
|
||||
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
||||
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=vaultwarden@domain.tld
|
||||
# SMTP_FROM_NAME=Vaultwarden
|
||||
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
||||
# SMTP_USERNAME=username
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_TIMEOUT=15
|
||||
|
||||
# Whether to send mail via the `sendmail` command
|
||||
# USE_SENDMAIL=false
|
||||
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||
# SENDMAIL_COMMAND="/path/to/sendmail"
|
||||
|
||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||
## Multiple options need to be separated by a comma ','.
|
||||
@@ -363,6 +416,9 @@
|
||||
## but might need to be changed in case it trips some anti-spam filters
|
||||
# HELO_NAME=
|
||||
|
||||
## Embed images as email attachments
|
||||
# SMTP_EMBED_IMAGES=false
|
||||
|
||||
## SMTP debugging
|
||||
## When set to true this will output very detailed SMTP messages.
|
||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||
|
1
.github/FUNDING.yml
vendored
@@ -1,2 +1,3 @@
|
||||
github: dani-garcia
|
||||
liberapay: dani-garcia
|
||||
custom: ["https://paypal.me/DaniGG"]
|
||||
|
95
.github/workflows/build.yml
vendored
@@ -9,6 +9,8 @@ on:
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
- "rustfmt.toml"
|
||||
- "diesel.toml"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/build.yml"
|
||||
@@ -17,58 +19,77 @@ on:
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
- "rustfmt.toml"
|
||||
- "diesel.toml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
# Make warnings errors, this is to prevent warnings slipping through.
|
||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||
env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
channel:
|
||||
- "rust-toolchain" # The version defined in rust-toolchain
|
||||
- "1.59.0" # The supported MSRV
|
||||
- "msrv" # The supported MSRV
|
||||
|
||||
name: Build and Test ${{ matrix.channel }}
|
||||
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: "Install dependencies Ubuntu"
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||
# End Install dependencies
|
||||
|
||||
|
||||
# Uses the rust-toolchain file to determine version
|
||||
# Determine rust-toolchain version
|
||||
- name: Init Variables
|
||||
id: toolchain
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||
else
|
||||
RUST_TOOLCHAIN="${{ matrix.channel }}"
|
||||
fi
|
||||
echo "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" | tee -a "${GITHUB_OUTPUT}"
|
||||
# End Determine rust-toolchain version
|
||||
|
||||
|
||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f # v1.0.6
|
||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
components: clippy, rustfmt
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Install the MSRV channel to be used
|
||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||
- name: "Install MSRV version"
|
||||
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f # v1.0.6
|
||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
profile: minimal
|
||||
override: true
|
||||
toolchain: ${{ matrix.channel }}
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
# End Install the MSRV channel to be used
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@6720f05bc48b77f96918929a9019fb2203ff71f8 # v2.0.0
|
||||
- uses: Swatinem/rust-cache@2656b87321093db1cb55fbd73183d195214fdfd1 # v2.5.0
|
||||
# End Enable Rust Caching
|
||||
|
||||
|
||||
@@ -84,65 +105,51 @@ jobs:
|
||||
# First test all features together, afterwards test them separately.
|
||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: test_sqlite_mysql_postgresql_mimalloc
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features sqlite,mysql,postgresql,enable_mimalloc
|
||||
run: |
|
||||
cargo test --release --features sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
- name: "test features: sqlite,mysql,postgresql"
|
||||
id: test_sqlite_mysql_postgresql
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features sqlite,mysql,postgresql
|
||||
run: |
|
||||
cargo test --release --features sqlite,mysql,postgresql
|
||||
|
||||
- name: "test features: sqlite"
|
||||
id: test_sqlite
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features sqlite
|
||||
run: |
|
||||
cargo test --release --features sqlite
|
||||
|
||||
- name: "test features: mysql"
|
||||
id: test_mysql
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features mysql
|
||||
run: |
|
||||
cargo test --release --features mysql
|
||||
|
||||
- name: "test features: postgresql"
|
||||
id: test_postgresql
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features postgresql
|
||||
run: |
|
||||
cargo test --release --features postgresql
|
||||
# End Run cargo tests
|
||||
|
||||
|
||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: clippy
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
command: clippy
|
||||
args: --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||
run: |
|
||||
cargo clippy --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||
# End Run cargo clippy
|
||||
|
||||
|
||||
# Run cargo fmt (Only run on rust-toolchain defined version)
|
||||
- name: "check formatting"
|
||||
id: formatting
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
run: |
|
||||
cargo fmt --all -- --check
|
||||
# End Run cargo fmt
|
||||
|
||||
|
||||
@@ -179,19 +186,17 @@ jobs:
|
||||
|
||||
# Build the binary to upload to the artifacts
|
||||
- name: "build features: sqlite,mysql,postgresql"
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
command: build
|
||||
args: --release --features sqlite,mysql,postgresql
|
||||
run: |
|
||||
cargo build --release --features sqlite,mysql,postgresql
|
||||
# End Build the binary
|
||||
|
||||
|
||||
# Upload artifact to Github Actions
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
name: vaultwarden
|
||||
path: target/${{ matrix.target-triple }}/release/vaultwarden
|
||||
path: target/release/vaultwarden
|
||||
# End Upload artifact to Github Actions
|
||||
|
17
.github/workflows/hadolint.yml
vendored
@@ -1,22 +1,19 @@
|
||||
name: Hadolint
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "docker/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "docker/**"
|
||||
on: [
|
||||
push,
|
||||
pull_request
|
||||
]
|
||||
|
||||
jobs:
|
||||
hadolint:
|
||||
name: Validate Dockerfile syntax
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
@@ -27,7 +24,7 @@ jobs:
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo chmod +x /usr/local/bin/hadolint
|
||||
env:
|
||||
HADOLINT_VERSION: 2.10.0
|
||||
HADOLINT_VERSION: 2.12.0
|
||||
# End Download hadolint
|
||||
|
||||
# Test Dockerfiles
|
||||
|
168
.github/workflows/release.yml
vendored
@@ -24,21 +24,22 @@ jobs:
|
||||
# Some checks to determine if we need to continue with building a new docker.
|
||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||
skip_check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- name: Skip Duplicates Actions
|
||||
id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@9d116fa7e55f295019cfab7e3ab72b478bcf7fdd # v4.0.0
|
||||
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
# Only run this when not creating a tag
|
||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
||||
|
||||
docker-build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
needs: skip_check
|
||||
# Start a local docker registry to be used to generate multi-arch images.
|
||||
services:
|
||||
@@ -47,11 +48,23 @@ jobs:
|
||||
ports:
|
||||
- 5000:5000
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1 # Disabled for now, but we should look at this because it will speedup building!
|
||||
# DOCKER_REPO/secrets.DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
|
||||
DOCKER_REPO: ${{ secrets.DOCKERHUB_REPO }}
|
||||
# Use BuildKit (https://docs.docker.com/build/buildkit/) for better
|
||||
# build performance and the ability to copy extended file attributes
|
||||
# (e.g., for executable capabilities) across build phases.
|
||||
DOCKER_BUILDKIT: 1
|
||||
SOURCE_COMMIT: ${{ github.sha }}
|
||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||
# The *_REPO variables need to be configured as repository variables
|
||||
# Append `/settings/variables/actions` to your repo url
|
||||
# DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
|
||||
# Check for Docker hub credentials in secrets
|
||||
HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
||||
# GHCR_REPO needs to be 'ghcr.io/<user>/<repo>'
|
||||
# Check for Github credentials in secrets
|
||||
HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }}
|
||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||
# Check for Quay.io credentials in secrets
|
||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -60,17 +73,10 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v2.0.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Determine Docker Tag
|
||||
- name: Init Variables
|
||||
id: vars
|
||||
@@ -78,42 +84,152 @@ jobs:
|
||||
run: |
|
||||
# Check which main tag we are going to build determined by github.ref
|
||||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
||||
echo "set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}"
|
||||
echo "::set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}"
|
||||
echo "DOCKER_TAG=${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
|
||||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
||||
echo "set-output name=DOCKER_TAG::testing"
|
||||
echo "::set-output name=DOCKER_TAG::testing"
|
||||
echo "DOCKER_TAG=testing" | tee -a "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
# End Determine Docker Tag
|
||||
|
||||
- name: Build Debian based images
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Login to Quay.io
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
# Debian
|
||||
|
||||
# Docker Hub
|
||||
- name: Build Debian based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' }}
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images
|
||||
- name: Push Debian based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' }}
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Build Alpine based images
|
||||
# GitHub Container Registry
|
||||
- name: Build Debian based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Quay.io
|
||||
- name: Build Debian based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
# Alpine
|
||||
|
||||
# Docker Hub
|
||||
- name: Build Alpine based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images
|
||||
- name: Push Alpine based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
# GitHub Container Registry
|
||||
- name: Build Alpine based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Quay.io
|
||||
- name: Build Alpine based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
@@ -3,5 +3,9 @@ ignored:
|
||||
- DL3008
|
||||
# disable explicit version for apk install
|
||||
- DL3018
|
||||
# disable check for consecutive `RUN` instructions
|
||||
- DL3059
|
||||
trustedRegistries:
|
||||
- docker.io
|
||||
- ghcr.io
|
||||
- quay.io
|
||||
|
@@ -1,16 +1,20 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.3.0
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: mixed-line-ending
|
||||
args: ["--fix=no"]
|
||||
- id: end-of-file-fixer
|
||||
exclude: "(.*js$|.*css$)"
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: check-symlinks
|
||||
- id: forbid-submodules
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: fmt
|
||||
@@ -27,7 +31,7 @@ repos:
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|.*\.rs$)
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
@@ -36,5 +40,5 @@ repos:
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|.*\.rs$)
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|clippy.toml|.*\.rs$)
|
||||
pass_filenames: false
|
||||
|
2115
Cargo.lock
generated
137
Cargo.toml
@@ -3,12 +3,12 @@ name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.59"
|
||||
rust-version = "1.68.2"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
readme = "README.md"
|
||||
license = "GPL-3.0-only"
|
||||
license = "AGPL-3.0-only"
|
||||
publish = false
|
||||
build = "build.rs"
|
||||
|
||||
@@ -24,6 +24,11 @@ vendored_openssl = ["openssl/vendored"]
|
||||
# Enable MiMalloc memory allocator to replace the default malloc
|
||||
# This can improve performance for Alpine builds
|
||||
enable_mimalloc = ["mimalloc"]
|
||||
# This is a development dependency, and should only be used during development!
|
||||
# It enables the usage of the diesel_logger crate, which is able to output the generated queries.
|
||||
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
||||
# if you want to turn off the logging for a specific run.
|
||||
query_logger = ["diesel_logger"]
|
||||
|
||||
# Enable unstable features, requires nightly
|
||||
# Currently only used to enable rusts official ip support
|
||||
@@ -31,69 +36,72 @@ unstable = []
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
# Logging
|
||||
syslog = "6.0.1" # Needs to be v4 until fern is updated
|
||||
syslog = "6.1.0"
|
||||
|
||||
[dependencies]
|
||||
# Logging
|
||||
log = "0.4.17"
|
||||
fern = { version = "0.6.1", features = ["syslog-6"] }
|
||||
tracing = { version = "0.1.35", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
backtrace = "0.3.66" # Logging panics to logfile instead stderr only
|
||||
log = "0.4.19"
|
||||
fern = { version = "0.6.2", features = ["syslog-6"] }
|
||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenvy = { version = "0.15.1", default-features = false }
|
||||
dotenvy = { version = "0.15.7", default-features = false }
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.13.0"
|
||||
once_cell = "1.18.0"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.15"
|
||||
num-derive = "0.3.3"
|
||||
num-derive = "0.4.0"
|
||||
|
||||
# Web framework
|
||||
rocket = { version = "0.5.0-rc.2", features = ["tls", "json"], default-features = false }
|
||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
||||
# rocket_ws = { version ="0.1.0-rc.3" }
|
||||
rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # v0.5 branch
|
||||
|
||||
# WebSockets libraries
|
||||
tokio-tungstenite = "0.17.2"
|
||||
tokio-tungstenite = "0.19.0"
|
||||
rmpv = "1.0.0" # MessagePack library
|
||||
dashmap = "5.3.4" # Concurrent hashmap implementation
|
||||
|
||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||
dashmap = "5.4.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.21"
|
||||
tokio = { version = "1.20.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time"] }
|
||||
futures = "0.3.28"
|
||||
tokio = { version = "1.29.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.139", features = ["derive"] }
|
||||
serde_json = "1.0.82"
|
||||
serde = { version = "1.0.166", features = ["derive"] }
|
||||
serde_json = "1.0.99"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.4.8", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "1.4.0"
|
||||
diesel = { version = "2.1.0", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "2.1.0"
|
||||
diesel_logger = { version = "0.3.0", optional = true }
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.22.2", features = ["bundled"], optional = true }
|
||||
# Bundled/Static SQLite
|
||||
libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
ring = "0.16.20"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "1.1.2", features = ["v4"] }
|
||||
uuid = { version = "1.4.0", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.19", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.6.1"
|
||||
time = "0.3.11"
|
||||
chrono = { version = "0.4.26", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.8.3"
|
||||
time = "0.3.22"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler_ng = "2.0.1"
|
||||
job_scheduler_ng = "2.0.4"
|
||||
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.3.2"
|
||||
data-encoding = "2.4.0"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "8.1.1"
|
||||
jsonwebtoken = "8.3.0"
|
||||
|
||||
# TOTP library
|
||||
totp-lite = "2.0.0"
|
||||
@@ -104,49 +112,72 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
||||
# WebAuthn libraries
|
||||
webauthn-rs = "0.3.2"
|
||||
|
||||
# Handling of URL's for WebAuthn
|
||||
url = "2.2.2"
|
||||
# Handling of URL's for WebAuthn and favicons
|
||||
url = "2.4.0"
|
||||
|
||||
# Email librariese-Base, Update crates and small change.
|
||||
lettre = { version = "0.10.0", features = ["smtp-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.1.0" # URL encoding library used for URL's in the emails
|
||||
# Email libraries
|
||||
lettre = { version = "0.10.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.4"
|
||||
|
||||
# Template library
|
||||
handlebars = { version = "4.3.2", features = ["dir_source"] }
|
||||
# HTML Template library
|
||||
handlebars = { version = "4.3.7", features = ["dir_source"] }
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.11", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||
reqwest = { version = "0.11.18", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
html5gum = "0.5.2"
|
||||
regex = { version = "1.6.0", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.1.1"
|
||||
bytes = "1.1.0"
|
||||
cached = "0.36.0"
|
||||
# Favicon extraction libraries
|
||||
html5gum = "0.5.3"
|
||||
regex = { version = "1.8.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.3.0"
|
||||
bytes = "1.4.0"
|
||||
|
||||
# Cache function results (Used for version check and favicon fetching)
|
||||
cached = "0.44.0"
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.16.0"
|
||||
cookie_store = "0.16.1"
|
||||
cookie = "0.16.2"
|
||||
cookie_store = "0.19.1"
|
||||
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.41"
|
||||
# Used by U2F, JWT and PostgreSQL
|
||||
openssl = "0.10.55"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.7"
|
||||
governor = "0.4.2"
|
||||
paste = "1.0.13"
|
||||
governor = "0.5.1"
|
||||
|
||||
# Capture CTRL+C
|
||||
ctrlc = { version = "3.2.2", features = ["termination"] }
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.17"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.29", features = ["secure"], default-features = false, optional = true }
|
||||
mimalloc = { version = "0.1.37", features = ["secure"], default-features = false, optional = true }
|
||||
which = "4.4.0"
|
||||
|
||||
# Argon2 library with support for the PHC format
|
||||
argon2 = "0.5.0"
|
||||
|
||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||
rpassword = "7.2.0"
|
||||
|
||||
[patch.crates-io]
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
||||
# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
||||
|
||||
# Strip debuginfo from the release builds
|
||||
# Also enable thin LTO for some optimizations
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
|
||||
# Always build argon2 using opt-level 3
|
||||
# This is a huge speed improvement during testing
|
||||
[profile.dev.package.argon2]
|
||||
opt-level = 3
|
||||
|
||||
# A little bit of a speedup
|
||||
[profile.dev]
|
||||
split-debuginfo = "unpacked"
|
||||
|
143
LICENSE.txt
@@ -1,5 +1,5 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
@@ -7,17 +7,15 @@
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
@@ -26,44 +24,34 @@ them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
@@ -72,7 +60,7 @@ modification follow.
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
@@ -549,35 +537,45 @@ to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
@@ -635,40 +633,29 @@ the "copyright" line and a pointer to where the full notice is found.
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
|
35
README.md
@@ -3,16 +3,18 @@
|
||||
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||
|
||||
---
|
||||
|
||||
[](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml)
|
||||
[](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden)
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://quay.io/repository/vaultwarden/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/master/LICENSE.txt)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor Bitwarden, Inc.**
|
||||
|
||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||
|
||||
@@ -23,23 +25,24 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
||||
Basically full implementation of Bitwarden API is provided including:
|
||||
|
||||
* Organizations support
|
||||
* Attachments
|
||||
* Attachments and Send
|
||||
* Vault API support
|
||||
* Serving the static files for Vault interface
|
||||
* Website icons API
|
||||
* Authenticator and U2F support
|
||||
* YubiKey and Duo support
|
||||
* Emergency Access
|
||||
|
||||
## Installation
|
||||
Pull the docker image and mount a volume from the host for persistent storage:
|
||||
|
||||
```sh
|
||||
docker pull vaultwarden/server:latest
|
||||
docker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server:latest
|
||||
docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p 80:80 vaultwarden/server:latest
|
||||
```
|
||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||
|
||||
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
||||
**IMPORTANT**: Most modern web browsers, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
|
||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
|
||||
@@ -49,41 +52,43 @@ If you have an available domain name, you can get HTTPS certificates with [Let's
|
||||
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||
|
||||
## Get in touch
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://vaultwarden.discourse.group/).
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please use [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [the forum](https://vaultwarden.discourse.group/).
|
||||
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure there aren't any similar issues open, though!
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure you are on the latest version and there aren't any similar issues open, though!
|
||||
|
||||
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||
|
||||
### Sponsors
|
||||
Thanks for your contribution to the project!
|
||||
|
||||
<!--
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/netdadaltd">
|
||||
<img src="https://avatars.githubusercontent.com/u/77323954?s=75&v=4" width="75px;" alt="netdadaltd"/>
|
||||
<a href="https://github.com/username">
|
||||
<img src="https://avatars.githubusercontent.com/u/725423?s=75&v=4" width="75px;" alt="username"/>
|
||||
<br />
|
||||
<sub><b>netDada Ltd.</b></sub>
|
||||
<sub><b>username</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<br/>
|
||||
-->
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/Gyarbij" style="width: 75px">
|
||||
<sub><b>Chono N</b></sub>
|
||||
<a href="https://github.com/themightychris" style="width: 75px">
|
||||
<sub><b>Chris Alfano</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/themightychris">
|
||||
<sub><b>Chris Alfano</b></sub>
|
||||
<a href="https://github.com/numberly" style="width: 75px">
|
||||
<sub><b>Numberly</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
23
build.rs
@@ -9,20 +9,25 @@ fn main() {
|
||||
println!("cargo:rustc-cfg=mysql");
|
||||
#[cfg(feature = "postgresql")]
|
||||
println!("cargo:rustc-cfg=postgresql");
|
||||
#[cfg(feature = "query_logger")]
|
||||
println!("cargo:rustc-cfg=query_logger");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!(
|
||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||
);
|
||||
|
||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||
compile_error!("Query Logging is only allowed during development, it is not intented for production usage!");
|
||||
|
||||
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||
// If neither exist, read from git.
|
||||
let maybe_vaultwarden_version =
|
||||
env::var("VW_VERSION").or_else(|_| env::var("BWRS_VERSION")).or_else(|_| version_from_git_info());
|
||||
|
||||
if let Ok(version) = maybe_vaultwarden_version {
|
||||
println!("cargo:rustc-env=VW_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
println!("cargo:rustc-env=VW_VERSION={version}");
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={version}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,29 +52,29 @@ fn version_from_git_info() -> Result<String, std::io::Error> {
|
||||
// the current commit doesn't have an associated tag
|
||||
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
|
||||
if let Some(ref exact) = exact_tag {
|
||||
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact);
|
||||
println!("cargo:rustc-env=GIT_EXACT_TAG={exact}");
|
||||
}
|
||||
|
||||
// The last available tag, equal to exact_tag when
|
||||
// the current commit is tagged
|
||||
let last_tag = run(&["git", "describe", "--abbrev=0", "--tags"])?;
|
||||
println!("cargo:rustc-env=GIT_LAST_TAG={}", last_tag);
|
||||
println!("cargo:rustc-env=GIT_LAST_TAG={last_tag}");
|
||||
|
||||
// The current branch name
|
||||
let branch = run(&["git", "rev-parse", "--abbrev-ref", "HEAD"])?;
|
||||
println!("cargo:rustc-env=GIT_BRANCH={}", branch);
|
||||
println!("cargo:rustc-env=GIT_BRANCH={branch}");
|
||||
|
||||
// The current git commit hash
|
||||
let rev = run(&["git", "rev-parse", "HEAD"])?;
|
||||
let rev_short = rev.get(..8).unwrap_or_default();
|
||||
println!("cargo:rustc-env=GIT_REV={}", rev_short);
|
||||
println!("cargo:rustc-env=GIT_REV={rev_short}");
|
||||
|
||||
// Combined version
|
||||
if let Some(exact) = exact_tag {
|
||||
Ok(exact)
|
||||
} else if &branch != "main" && &branch != "master" {
|
||||
Ok(format!("{}-{} ({})", last_tag, rev_short, branch))
|
||||
} else if &branch != "main" && &branch != "master" && &branch != "HEAD" {
|
||||
Ok(format!("{last_tag}-{rev_short} ({branch})"))
|
||||
} else {
|
||||
Ok(format!("{}-{}", last_tag, rev_short))
|
||||
Ok(format!("{last_tag}-{rev_short}"))
|
||||
}
|
||||
}
|
||||
|
@@ -2,40 +2,42 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.61-bullseye" %}
|
||||
{% set rust_version = "1.70.0" %}
|
||||
{% set debian_version = "bullseye" %}
|
||||
{% set alpine_version = "3.17" %}
|
||||
{% set build_stage_base_image = "docker.io/library/rust:%s-%s" % (rust_version, debian_version) %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable-1.61.0" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.15" %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:x86_64-musl-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/library/alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable-1.61.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.15" %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:armv7-musleabihf-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable-1.61.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.15" %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:arm-musleabi-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable-1.61.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.15" %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:aarch64-musl-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
{% set runtime_stage_base_image = "debian:bullseye-slim" %}
|
||||
{% set runtime_stage_base_image = "docker.io/library/debian:%s-slim" % debian_version %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:bullseye" %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-debian:%s" % debian_version %}
|
||||
{% set package_arch_name = "arm64" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:bullseye" %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-debian:%s" % debian_version %}
|
||||
{% set package_arch_name = "armel" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:bullseye" %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-debian:%s" % debian_version %}
|
||||
{% set package_arch_name = "armhf" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||
@@ -50,7 +52,7 @@
|
||||
{% else %}
|
||||
{% set package_arch_target_param = "" %}
|
||||
{% endif %}
|
||||
{% if "buildx" in target_file %}
|
||||
{% if "buildkit" in target_file %}
|
||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
||||
{% else %}
|
||||
{% set mount_rust_cache = "" %}
|
||||
@@ -59,8 +61,8 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_version = "v2022.6.2" %}
|
||||
{% set vault_image_digest = "sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70" %}
|
||||
{% set vault_version = "v2023.5.0" %}
|
||||
{% set vault_image_digest = "sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085" %}
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
@@ -70,21 +72,19 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:{{ vault_version }}
|
||||
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" docker.io/vaultwarden/web-vault:{{ vault_version }}
|
||||
# [docker.io/vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [vaultwarden/web-vault:{{ vault_version }}]
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||
#
|
||||
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
FROM docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
@@ -93,7 +93,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -104,21 +103,19 @@ RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
||||
{% endif %}
|
||||
{% elif "arm" in target_file %}
|
||||
#
|
||||
# Install required build libs for {{ package_arch_name }} architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the {{ package_arch_name }} architecture
|
||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
libc6-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
libmariadb3{{ package_arch_prefix }} \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
libmariadb3{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
||||
@@ -130,16 +127,13 @@ ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
|
||||
{% elif "amd64" in target_file %}
|
||||
# Install DB packages
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libmariadb-dev \
|
||||
libpq-dev
|
||||
{% endif %}
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -178,7 +172,6 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
@@ -195,7 +188,6 @@ ENV ROCKET_PROFILE="release" \
|
||||
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
{% endif %}
|
||||
|
||||
@@ -203,20 +195,18 @@ RUN [ "cross-build-start" ]
|
||||
RUN mkdir /data \
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
{% else %}
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
@@ -224,13 +214,11 @@ RUN mkdir /data \
|
||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
{% endif -%}
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
{% endif %}
|
||||
|
||||
@@ -253,10 +241,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -8,8 +8,8 @@ all: $(OBJECTS)
|
||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildx: Dockerfile.j2 render_template
|
||||
%/Dockerfile.buildkit: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildx.alpine: Dockerfile.j2 render_template
|
||||
%/Dockerfile.buildkit.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.61-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,19 +36,16 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install DB packages
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libpq-dev
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -81,13 +75,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:bullseye-slim
|
||||
FROM docker.io/library/debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -98,12 +91,11 @@ ENV ROCKET_PROFILE="release" \
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -123,10 +115,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.61.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.15
|
||||
FROM docker.io/library/alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -93,11 +88,10 @@ ENV ROCKET_PROFILE="release" \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
|
||||
VOLUME /data
|
||||
@@ -115,10 +109,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.61-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,19 +36,16 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install DB packages
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libpq-dev
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -81,13 +75,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:bullseye-slim
|
||||
FROM docker.io/library/debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -98,12 +91,11 @@ ENV ROCKET_PROFILE="release" \
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -123,10 +115,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.61.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.15
|
||||
FROM docker.io/library/alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -93,11 +88,10 @@ ENV ROCKET_PROFILE="release" \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
|
||||
VOLUME /data
|
||||
@@ -115,10 +109,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.61-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the arm64 architecture
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libmariadb3:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libssl-dev:arm64 \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,35 +94,31 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -147,10 +136,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.61.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.15
|
||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -89,19 +84,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -119,10 +111,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.61-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the arm64 architecture
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libmariadb3:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libssl-dev:arm64 \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,35 +94,31 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -147,10 +136,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.61.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.15
|
||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -89,19 +84,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -119,10 +111,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.61-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armel architecture
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libmariadb3:armel \
|
||||
libpq-dev:armel \
|
||||
libpq5:armel \
|
||||
libssl-dev:armel \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,40 +94,35 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -152,10 +140,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.61.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -77,13 +73,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.15
|
||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -91,19 +86,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -121,10 +113,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.61-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armel architecture
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libmariadb3:armel \
|
||||
libpq-dev:armel \
|
||||
libpq5:armel \
|
||||
libssl-dev:armel \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,40 +94,35 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -152,10 +140,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.61.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -77,13 +73,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.15
|
||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -91,19 +86,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -121,10 +113,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.61-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armhf architecture
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libmariadb3:armhf \
|
||||
libpq-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libssl-dev:armhf \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,35 +94,31 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -147,10 +136,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.61.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.15
|
||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -89,19 +84,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -119,10 +111,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.61-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armhf architecture
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libmariadb3:armhf \
|
||||
libpq-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libssl-dev:armhf \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,35 +94,31 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -147,10 +136,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.6.2
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.6.2
|
||||
# [vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70
|
||||
# [vaultwarden/web-vault:v2022.6.2]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:1dfda41cbddeac5bc59540261fff8defcac37170b5ba02d29c12fa1215498f70 as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.61.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.15
|
||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -89,19 +84,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -119,10 +111,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||
# But the balenalib images have some issues with there entry.sh
|
||||
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||
# Lets keep using dumb-init for now, since that is working fine.
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -2,8 +2,8 @@
|
||||
|
||||
# Use the value of the corresponding env var (if present),
|
||||
# or a default value otherwise.
|
||||
: ${DATA_FOLDER:="data"}
|
||||
: ${ROCKET_PORT:="80"}
|
||||
: "${DATA_FOLDER:="data"}"
|
||||
: "${ROCKET_PORT:="80"}"
|
||||
|
||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||
|
||||
@@ -45,9 +45,13 @@ if [ -r "${CONFIG_FILE}" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
addr="${ROCKET_ADDRESS}"
|
||||
if [ -z "${addr}" ] || [ "${addr}" = '0.0.0.0' ] || [ "${addr}" = '::' ]; then
|
||||
addr='localhost'
|
||||
fi
|
||||
base_path="$(get_base_path "${DOMAIN}")"
|
||||
if [ -n "${ROCKET_TLS}" ]; then
|
||||
s='s'
|
||||
fi
|
||||
curl --insecure --fail --silent --show-error \
|
||||
"http${s}://localhost:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||
"http${s}://${addr}:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||
|
@@ -9,15 +9,15 @@ fi
|
||||
|
||||
if [ -d /etc/vaultwarden.d ]; then
|
||||
for f in /etc/vaultwarden.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
if [ -r "${f}" ]; then
|
||||
. "${f}"
|
||||
fi
|
||||
done
|
||||
elif [ -d /etc/bitwarden_rs.d ]; then
|
||||
echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###"
|
||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
if [ -r "${f}" ]; then
|
||||
. "${f}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
@@ -1,3 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# The default Debian-based images support these arches for all database backends.
|
||||
arches=(
|
||||
amd64
|
||||
@@ -5,7 +7,9 @@ arches=(
|
||||
armv7
|
||||
arm64
|
||||
)
|
||||
export arches
|
||||
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
distro_suffix=.alpine
|
||||
fi
|
||||
export distro_suffix
|
||||
|
13
hooks/build
@@ -1,7 +1,8 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
echo ">>> Building images..."
|
||||
|
||||
# shellcheck source=arches.sh
|
||||
source ./hooks/arches.sh
|
||||
|
||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||
@@ -23,10 +24,10 @@ LABELS=(
|
||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||
org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
|
||||
org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||
)
|
||||
LABEL_ARGS=()
|
||||
@@ -34,9 +35,9 @@ for label in "${LABELS[@]}"; do
|
||||
LABEL_ARGS+=(--label "${label}")
|
||||
done
|
||||
|
||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildx as template
|
||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildkit as template
|
||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
||||
buildx_suffix=.buildx
|
||||
buildkit_suffix=.buildkit
|
||||
fi
|
||||
|
||||
set -ex
|
||||
@@ -45,6 +46,6 @@ for arch in "${arches[@]}"; do
|
||||
docker build \
|
||||
"${LABEL_ARGS[@]}" \
|
||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||
-f docker/${arch}/Dockerfile${buildx_suffix}${distro_suffix} \
|
||||
-f "docker/${arch}/Dockerfile${buildkit_suffix}${distro_suffix}" \
|
||||
.
|
||||
done
|
||||
|
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
|
54
hooks/push
@@ -1,5 +1,6 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# shellcheck source=arches.sh
|
||||
source ./hooks/arches.sh
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
@@ -41,7 +42,7 @@ LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||
|
||||
echo ">>> Pushing images to local registry..."
|
||||
|
||||
for arch in ${arches[@]}; do
|
||||
for arch in "${arches[@]}"; do
|
||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||
docker tag "${docker_image}" "${local_image}"
|
||||
@@ -71,9 +72,9 @@ tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||
# to make it easier for users to track the latest release.
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
tags+=(${DOCKER_REPO}:alpine)
|
||||
tags+=("${DOCKER_REPO}:alpine")
|
||||
else
|
||||
tags+=(${DOCKER_REPO}:latest)
|
||||
tags+=("${DOCKER_REPO}:latest")
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -91,10 +92,10 @@ declare -A arch_to_platform=(
|
||||
[arm64]="linux/arm64"
|
||||
)
|
||||
platforms=()
|
||||
for arch in ${arches[@]}; do
|
||||
for arch in "${arches[@]}"; do
|
||||
platforms+=("${arch_to_platform[$arch]}")
|
||||
done
|
||||
platforms="$(join "," "${platforms[@]}")"
|
||||
platform="$(join "," "${platforms[@]}")"
|
||||
|
||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||
@@ -104,46 +105,7 @@ docker buildx build \
|
||||
--network host \
|
||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||
--platform "${platforms}" \
|
||||
--platform "${platform}" \
|
||||
"${tag_args[@]}" \
|
||||
--push \
|
||||
- < ./docker/Dockerfile.buildx
|
||||
|
||||
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
|
||||
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
#
|
||||
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||
# existing vaultwarden tags, which adhere to the naming conventions of the
|
||||
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
|
||||
#
|
||||
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
|
||||
# so this step can be removed once fixed versions are in wider distribution.
|
||||
#
|
||||
# Tags:
|
||||
#
|
||||
# testing => testing-arm32v6
|
||||
# testing-alpine => <ignored>
|
||||
# x.y.z => x.y.z-arm32v6, latest-arm32v6
|
||||
# x.y.z-alpine => <ignored>
|
||||
#
|
||||
if [[ "${DOCKER_TAG}" != *alpine ]]; then
|
||||
image="${DOCKER_REPO}":"${DOCKER_TAG}"
|
||||
|
||||
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
|
||||
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
|
||||
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
|
||||
|
||||
# Pull the armv6 image by digest, retag it, and repush it.
|
||||
docker pull "${DOCKER_REPO}"@"${digest}"
|
||||
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
|
||||
docker push "${image}"-arm32v6
|
||||
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
|
||||
docker push "${DOCKER_REPO}:latest"-arm32v6
|
||||
fi
|
||||
fi
|
||||
|
@@ -0,0 +1,3 @@
|
||||
DROP TABLE `groups`;
|
||||
DROP TABLE groups_users;
|
||||
DROP TABLE collections_groups;
|
23
migrations/mysql/2022-07-27-110000_add_group_support/up.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE `groups` (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
organizations_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
|
||||
name VARCHAR(100) NOT NULL,
|
||||
access_all BOOLEAN NOT NULL,
|
||||
external_id VARCHAR(300) NULL,
|
||||
creation_date DATETIME NOT NULL,
|
||||
revision_date DATETIME NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE groups_users (
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES `groups` (uuid),
|
||||
users_organizations_uuid VARCHAR(36) NOT NULL REFERENCES users_organizations (uuid),
|
||||
UNIQUE (groups_uuid, users_organizations_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE collections_groups (
|
||||
collections_uuid VARCHAR(40) NOT NULL REFERENCES collections (uuid),
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES `groups` (uuid),
|
||||
read_only BOOLEAN NOT NULL,
|
||||
hide_passwords BOOLEAN NOT NULL,
|
||||
UNIQUE (collections_uuid, groups_uuid)
|
||||
);
|
1
migrations/mysql/2022-10-18-170602_add_events/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE event;
|
19
migrations/mysql/2022-10-18-170602_add_events/up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE event (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
event_type INTEGER NOT NULL,
|
||||
user_uuid CHAR(36),
|
||||
org_uuid CHAR(36),
|
||||
cipher_uuid CHAR(36),
|
||||
collection_uuid CHAR(36),
|
||||
group_uuid CHAR(36),
|
||||
org_user_uuid CHAR(36),
|
||||
act_user_uuid CHAR(36),
|
||||
device_type INTEGER,
|
||||
ip_address TEXT,
|
||||
event_date DATETIME NOT NULL,
|
||||
policy_uuid CHAR(36),
|
||||
provider_uuid CHAR(36),
|
||||
provider_user_uuid CHAR(36),
|
||||
provider_org_uuid CHAR(36),
|
||||
UNIQUE (uuid)
|
||||
);
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN reset_password_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN avatar_color VARCHAR(7);
|
7
migrations/mysql/2023-01-31-222222_add_argon2/up.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_memory INTEGER DEFAULT NULL;
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_parallelism INTEGER DEFAULT NULL;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE devices ADD COLUMN push_uuid TEXT;
|
@@ -0,0 +1,10 @@
|
||||
CREATE TABLE organization_api_key (
|
||||
uuid CHAR(36) NOT NULL,
|
||||
org_uuid CHAR(36) NOT NULL REFERENCES organizations(uuid),
|
||||
atype INTEGER NOT NULL,
|
||||
api_key VARCHAR(255) NOT NULL,
|
||||
revision_date DATETIME NOT NULL,
|
||||
PRIMARY KEY(uuid, org_uuid)
|
||||
);
|
||||
|
||||
ALTER TABLE users ADD COLUMN external_id TEXT;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE collections ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,3 @@
|
||||
DROP TABLE groups;
|
||||
DROP TABLE groups_users;
|
||||
DROP TABLE collections_groups;
|
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE groups (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
organizations_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
|
||||
name VARCHAR(100) NOT NULL,
|
||||
access_all BOOLEAN NOT NULL,
|
||||
external_id VARCHAR(300) NULL,
|
||||
creation_date TIMESTAMP NOT NULL,
|
||||
revision_date TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE groups_users (
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES groups (uuid),
|
||||
users_organizations_uuid VARCHAR(36) NOT NULL REFERENCES users_organizations (uuid),
|
||||
PRIMARY KEY (groups_uuid, users_organizations_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE collections_groups (
|
||||
collections_uuid VARCHAR(40) NOT NULL REFERENCES collections (uuid),
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES groups (uuid),
|
||||
read_only BOOLEAN NOT NULL,
|
||||
hide_passwords BOOLEAN NOT NULL,
|
||||
PRIMARY KEY (collections_uuid, groups_uuid)
|
||||
);
|
@@ -0,0 +1 @@
|
||||
DROP TABLE event;
|
19
migrations/postgresql/2022-10-18-170602_add_events/up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE event (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
event_type INTEGER NOT NULL,
|
||||
user_uuid CHAR(36),
|
||||
org_uuid CHAR(36),
|
||||
cipher_uuid CHAR(36),
|
||||
collection_uuid CHAR(36),
|
||||
group_uuid CHAR(36),
|
||||
org_user_uuid CHAR(36),
|
||||
act_user_uuid CHAR(36),
|
||||
device_type INTEGER,
|
||||
ip_address TEXT,
|
||||
event_date TIMESTAMP NOT NULL,
|
||||
policy_uuid CHAR(36),
|
||||
provider_uuid CHAR(36),
|
||||
provider_user_uuid CHAR(36),
|
||||
provider_org_uuid CHAR(36),
|
||||
UNIQUE (uuid)
|
||||
);
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN reset_password_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN avatar_color TEXT;
|
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_memory INTEGER DEFAULT NULL;
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_parallelism INTEGER DEFAULT NULL;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE devices ADD COLUMN push_uuid TEXT;
|
@@ -0,0 +1,10 @@
|
||||
CREATE TABLE organization_api_key (
|
||||
uuid CHAR(36) NOT NULL,
|
||||
org_uuid CHAR(36) NOT NULL REFERENCES organizations(uuid),
|
||||
atype INTEGER NOT NULL,
|
||||
api_key VARCHAR(255),
|
||||
revision_date TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY(uuid, org_uuid)
|
||||
);
|
||||
|
||||
ALTER TABLE users ADD COLUMN external_id TEXT;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE collections ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,3 @@
|
||||
DROP TABLE groups;
|
||||
DROP TABLE groups_users;
|
||||
DROP TABLE collections_groups;
|
23
migrations/sqlite/2022-07-27-110000_add_group_support/up.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE groups (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
organizations_uuid TEXT NOT NULL REFERENCES organizations (uuid),
|
||||
name TEXT NOT NULL,
|
||||
access_all BOOLEAN NOT NULL,
|
||||
external_id TEXT NULL,
|
||||
creation_date TIMESTAMP NOT NULL,
|
||||
revision_date TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE groups_users (
|
||||
groups_uuid TEXT NOT NULL REFERENCES groups (uuid),
|
||||
users_organizations_uuid TEXT NOT NULL REFERENCES users_organizations (uuid),
|
||||
UNIQUE (groups_uuid, users_organizations_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE collections_groups (
|
||||
collections_uuid TEXT NOT NULL REFERENCES collections (uuid),
|
||||
groups_uuid TEXT NOT NULL REFERENCES groups (uuid),
|
||||
read_only BOOLEAN NOT NULL,
|
||||
hide_passwords BOOLEAN NOT NULL,
|
||||
UNIQUE (collections_uuid, groups_uuid)
|
||||
);
|
1
migrations/sqlite/2022-10-18-170602_add_events/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE event;
|
19
migrations/sqlite/2022-10-18-170602_add_events/up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE event (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
event_type INTEGER NOT NULL,
|
||||
user_uuid TEXT,
|
||||
org_uuid TEXT,
|
||||
cipher_uuid TEXT,
|
||||
collection_uuid TEXT,
|
||||
group_uuid TEXT,
|
||||
org_user_uuid TEXT,
|
||||
act_user_uuid TEXT,
|
||||
device_type INTEGER,
|
||||
ip_address TEXT,
|
||||
event_date DATETIME NOT NULL,
|
||||
policy_uuid TEXT,
|
||||
provider_uuid TEXT,
|
||||
provider_user_uuid TEXT,
|
||||
provider_org_uuid TEXT,
|
||||
UNIQUE (uuid)
|
||||
);
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN reset_password_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN avatar_color TEXT;
|
7
migrations/sqlite/2023-01-31-222222_add_argon2/up.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_memory INTEGER DEFAULT NULL;
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_parallelism INTEGER DEFAULT NULL;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE devices ADD COLUMN push_uuid TEXT;
|
@@ -0,0 +1,11 @@
|
||||
CREATE TABLE organization_api_key (
|
||||
uuid TEXT NOT NULL,
|
||||
org_uuid TEXT NOT NULL,
|
||||
atype INTEGER NOT NULL,
|
||||
api_key TEXT NOT NULL,
|
||||
revision_date DATETIME NOT NULL,
|
||||
PRIMARY KEY(uuid, org_uuid),
|
||||
FOREIGN KEY(org_uuid) REFERENCES organizations(uuid)
|
||||
);
|
||||
|
||||
ALTER TABLE users ADD COLUMN external_id TEXT;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE collections ADD COLUMN external_id TEXT;
|
93
resources/404.svg
Normal file
@@ -0,0 +1,93 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="500"
|
||||
height="222"
|
||||
viewBox="0 0 500 222"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
xml:space="preserve"
|
||||
inkscape:version="1.2.1 (9c6d41e410, 2022-07-14, custom)"
|
||||
sodipodi:docname="404.svg"
|
||||
inkscape:export-filename="404.png"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
|
||||
id="namedview7"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="px"
|
||||
showgrid="false"
|
||||
inkscape:zoom="1.3791767"
|
||||
inkscape:cx="284.59007"
|
||||
inkscape:cy="214.25826"
|
||||
inkscape:window-width="1916"
|
||||
inkscape:window-height="1038"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="18"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="layer1"
|
||||
showguides="false" /><defs
|
||||
id="defs2"><mask
|
||||
id="holes"><rect
|
||||
x="-60"
|
||||
y="-60"
|
||||
width="120"
|
||||
height="120"
|
||||
fill="#ffffff"
|
||||
id="rect3296" /><circle
|
||||
id="hole"
|
||||
cy="-40"
|
||||
r="3"
|
||||
cx="0" /><use
|
||||
transform="rotate(72)"
|
||||
xlink:href="#hole"
|
||||
id="use3299" /><use
|
||||
transform="rotate(144)"
|
||||
xlink:href="#hole"
|
||||
id="use3301" /><use
|
||||
transform="rotate(-144)"
|
||||
xlink:href="#hole"
|
||||
id="use3303" /><use
|
||||
transform="rotate(-72)"
|
||||
xlink:href="#hole"
|
||||
id="use3305" /></mask></defs><g
|
||||
inkscape:label="Ebene 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"><rect
|
||||
style="fill:none;fill-opacity:0.5;stroke:none;stroke-width:0.74;stroke-opacity:1"
|
||||
id="rect681"
|
||||
width="666"
|
||||
height="222"
|
||||
x="0"
|
||||
y="0" /><text
|
||||
xml:space="preserve"
|
||||
style="font-size:128px;line-height:1.25;font-family:'Open Sans';-inkscape-font-specification:'Open Sans';text-align:center;text-anchor:middle;fill:#000000;fill-opacity:0.7;stroke-width:1"
|
||||
x="249.9375"
|
||||
y="134.8125"
|
||||
id="text3425"><tspan
|
||||
id="tspan3423"
|
||||
x="249.9375"
|
||||
y="134.8125"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:128px;font-family:'Open Sans';-inkscape-font-specification:'Open Sans';text-align:center;text-anchor:middle;fill:#000000;fill-opacity:0.7;stroke-width:1"
|
||||
sodipodi:role="line">404</tspan></text><text
|
||||
xml:space="preserve"
|
||||
style="font-size:26.6667px;line-height:1.25;font-family:'Open Sans';-inkscape-font-specification:'Open Sans';text-align:center;text-anchor:middle"
|
||||
x="249.04297"
|
||||
y="194.68582"
|
||||
id="text4067"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan4065"
|
||||
x="249.04295"
|
||||
y="194.68582"
|
||||
style="font-size:26.6667px;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:0.7">Return to the web vault?</tspan></text></g></svg>
|
After Width: | Height: | Size: 3.3 KiB |
Before Width: | Height: | Size: 8.7 KiB After Width: | Height: | Size: 5.5 KiB |
Before Width: | Height: | Size: 8.4 KiB After Width: | Height: | Size: 5.2 KiB |
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 6.5 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 6.5 KiB |
@@ -1 +1 @@
|
||||
1.61.0
|
||||
1.70.0
|
||||
|
@@ -1,7 +1,4 @@
|
||||
# version = "Two"
|
||||
edition = "2021"
|
||||
max_width = 120
|
||||
newline_style = "Unix"
|
||||
use_small_heuristics = "Off"
|
||||
# struct_lit_single_line = false
|
||||
# overflow_delimited_expr = true
|
||||
|
538
src/api/admin.rs
@@ -6,14 +6,14 @@ use std::env;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{
|
||||
form::Form,
|
||||
http::{Cookie, CookieJar, SameSite, Status},
|
||||
request::{self, FlashMessage, FromRequest, Outcome, Request},
|
||||
response::{content::RawHtml as Html, Flash, Redirect},
|
||||
Route,
|
||||
http::{Cookie, CookieJar, MediaType, SameSite, Status},
|
||||
request::{FromRequest, Outcome, Request},
|
||||
response::{content::RawHtml as Html, Redirect},
|
||||
Catcher, Route,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||
api::{core::log_event, unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||
@@ -25,19 +25,18 @@ use crate::{
|
||||
CONFIG, VERSION,
|
||||
};
|
||||
|
||||
use futures::{stream, stream::StreamExt};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||
return routes![admin_disabled];
|
||||
}
|
||||
|
||||
routes![
|
||||
admin_login,
|
||||
get_users_json,
|
||||
get_user_json,
|
||||
get_user_by_mail_json,
|
||||
post_admin_login,
|
||||
admin_page,
|
||||
admin_page_login,
|
||||
invite_user,
|
||||
logout,
|
||||
delete_user,
|
||||
@@ -55,10 +54,19 @@ pub fn routes() -> Vec<Route> {
|
||||
organizations_overview,
|
||||
delete_organization,
|
||||
diagnostics,
|
||||
get_diagnostics_config
|
||||
get_diagnostics_config,
|
||||
resend_user_invite,
|
||||
]
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||
catchers![]
|
||||
} else {
|
||||
catchers![admin_login]
|
||||
}
|
||||
}
|
||||
|
||||
static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||
DbConnType::from_url(&CONFIG.database_url())
|
||||
.map(|t| match t {
|
||||
@@ -83,21 +91,12 @@ const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z";
|
||||
|
||||
const BASE_TEMPLATE: &str = "admin/base";
|
||||
|
||||
const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000";
|
||||
|
||||
fn admin_path() -> String {
|
||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||
}
|
||||
|
||||
struct Referer(Option<String>);
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for Referer {
|
||||
type Error = ();
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
||||
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct IpHeader(Option<String>);
|
||||
|
||||
@@ -120,35 +119,36 @@ impl<'r> FromRequest<'r> for IpHeader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Used for `Location` response headers, which must specify an absolute URI
|
||||
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||
fn admin_url(referer: Referer) -> String {
|
||||
// If we get a referer use that to make it work when, DOMAIN is not set
|
||||
if let Some(mut referer) = referer.0 {
|
||||
if let Some(start_index) = referer.find(ADMIN_PATH) {
|
||||
referer.truncate(start_index + ADMIN_PATH.len());
|
||||
return referer;
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.domain_set() {
|
||||
// Don't use CONFIG.domain() directly, since the user may want to keep a
|
||||
// trailing slash there, particularly when running under a subpath.
|
||||
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
|
||||
} else {
|
||||
// Last case, when no referer or domain set, technically invalid but better than nothing
|
||||
ADMIN_PATH.to_string()
|
||||
}
|
||||
fn admin_url() -> String {
|
||||
format!("{}{}", CONFIG.domain_origin(), admin_path())
|
||||
}
|
||||
|
||||
#[get("/", rank = 2)]
|
||||
fn admin_login(flash: Option<FlashMessage<'_>>) -> ApiResult<Html<String>> {
|
||||
#[derive(Responder)]
|
||||
enum AdminResponse {
|
||||
#[response(status = 200)]
|
||||
Ok(ApiResult<Html<String>>),
|
||||
#[response(status = 401)]
|
||||
Unauthorized(ApiResult<Html<String>>),
|
||||
#[response(status = 429)]
|
||||
TooManyRequests(ApiResult<Html<String>>),
|
||||
}
|
||||
|
||||
#[catch(401)]
|
||||
fn admin_login(request: &Request<'_>) -> ApiResult<Html<String>> {
|
||||
if request.format() == Some(&MediaType::JSON) {
|
||||
err_code!("Authorization failed.", Status::Unauthorized.code);
|
||||
}
|
||||
let redirect = request.segments::<std::path::PathBuf>(0..).unwrap_or_default().display().to_string();
|
||||
render_admin_login(None, Some(redirect))
|
||||
}
|
||||
|
||||
fn render_admin_login(msg: Option<&str>, redirect: Option<String>) -> ApiResult<Html<String>> {
|
||||
// If there is an error, show it
|
||||
let msg = flash.map(|msg| format!("{}: {}", msg.kind(), msg.message()));
|
||||
let msg = msg.map(|msg| format!("Error: {msg}"));
|
||||
let json = json!({
|
||||
"page_content": "admin/login",
|
||||
"version": VERSION,
|
||||
"error": msg,
|
||||
"redirect": redirect,
|
||||
"urlpath": CONFIG.domain_path()
|
||||
});
|
||||
|
||||
@@ -160,25 +160,25 @@ fn admin_login(flash: Option<FlashMessage<'_>>) -> ApiResult<Html<String>> {
|
||||
#[derive(FromForm)]
|
||||
struct LoginForm {
|
||||
token: String,
|
||||
redirect: Option<String>,
|
||||
}
|
||||
|
||||
#[post("/", data = "<data>")]
|
||||
fn post_admin_login(
|
||||
data: Form<LoginForm>,
|
||||
cookies: &CookieJar<'_>,
|
||||
ip: ClientIp,
|
||||
referer: Referer,
|
||||
) -> Result<Redirect, Flash<Redirect>> {
|
||||
fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> Result<Redirect, AdminResponse> {
|
||||
let data = data.into_inner();
|
||||
let redirect = data.redirect;
|
||||
|
||||
if crate::ratelimit::check_limit_admin(&ip.ip).is_err() {
|
||||
return Err(Flash::error(Redirect::to(admin_url(referer)), "Too many requests, try again later."));
|
||||
return Err(AdminResponse::TooManyRequests(render_admin_login(
|
||||
Some("Too many requests, try again later."),
|
||||
redirect,
|
||||
)));
|
||||
}
|
||||
|
||||
// If the token is invalid, redirect to login page
|
||||
if !_validate_token(&data.token) {
|
||||
error!("Invalid admin token. IP: {}", ip.ip);
|
||||
Err(Flash::error(Redirect::to(admin_url(referer)), "Invalid admin token, please try again."))
|
||||
Err(AdminResponse::Unauthorized(render_admin_login(Some("Invalid admin token, please try again."), redirect)))
|
||||
} else {
|
||||
// If the token received is valid, generate JWT and save it as a cookie
|
||||
let claims = generate_admin_claims();
|
||||
@@ -186,19 +186,36 @@ fn post_admin_login(
|
||||
|
||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||
.path(admin_path())
|
||||
.max_age(rocket::time::Duration::minutes(20))
|
||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||
.same_site(SameSite::Strict)
|
||||
.http_only(true)
|
||||
.finish();
|
||||
|
||||
cookies.add(cookie);
|
||||
Ok(Redirect::to(admin_url(referer)))
|
||||
if let Some(redirect) = redirect {
|
||||
Ok(Redirect::to(format!("{}{}", admin_path(), redirect)))
|
||||
} else {
|
||||
Err(AdminResponse::Ok(render_admin_page()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn _validate_token(token: &str) -> bool {
|
||||
match CONFIG.admin_token().as_ref() {
|
||||
None => false,
|
||||
Some(t) if t.starts_with("$argon2") => {
|
||||
use argon2::password_hash::PasswordVerifier;
|
||||
match argon2::password_hash::PasswordHash::new(t) {
|
||||
Ok(h) => {
|
||||
// NOTE: hash params from `ADMIN_TOKEN` are used instead of what is configured in the `Argon2` instance.
|
||||
argon2::Argon2::default().verify_password(token.trim().as_ref(), &h).is_ok()
|
||||
}
|
||||
Err(e) => {
|
||||
error!("The configured Argon2 PHC in `ADMIN_TOKEN` is invalid: {e}");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(t) => crate::crypto::ct_eq(t.trim(), token.trim()),
|
||||
}
|
||||
}
|
||||
@@ -206,34 +223,16 @@ fn _validate_token(token: &str) -> bool {
|
||||
#[derive(Serialize)]
|
||||
struct AdminTemplateData {
|
||||
page_content: String,
|
||||
version: Option<&'static str>,
|
||||
page_data: Option<Value>,
|
||||
config: Value,
|
||||
can_backup: bool,
|
||||
logged_in: bool,
|
||||
urlpath: String,
|
||||
}
|
||||
|
||||
impl AdminTemplateData {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/settings"),
|
||||
version: VERSION,
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
page_data: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn with_data(page_content: &str, page_data: Value) -> Self {
|
||||
fn new(page_content: &str, page_data: Value) -> Self {
|
||||
Self {
|
||||
page_content: String::from(page_content),
|
||||
version: VERSION,
|
||||
page_data: Some(page_data),
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
}
|
||||
@@ -244,19 +243,32 @@ impl AdminTemplateData {
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/", rank = 1)]
|
||||
fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
|
||||
let text = AdminTemplateData::new().render()?;
|
||||
fn render_admin_page() -> ApiResult<Html<String>> {
|
||||
let settings_json = json!({
|
||||
"config": CONFIG.prepare_json(),
|
||||
"can_backup": *CAN_BACKUP,
|
||||
});
|
||||
let text = AdminTemplateData::new("admin/settings", settings_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/")]
|
||||
fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
|
||||
render_admin_page()
|
||||
}
|
||||
|
||||
#[get("/", rank = 2)]
|
||||
fn admin_page_login() -> ApiResult<Html<String>> {
|
||||
render_admin_login(None, None)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct InviteData {
|
||||
email: String,
|
||||
}
|
||||
|
||||
async fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(uuid, conn).await {
|
||||
Ok(user)
|
||||
} else {
|
||||
@@ -265,28 +277,28 @@ async fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
}
|
||||
|
||||
#[post("/invite", data = "<data>")]
|
||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
let email = data.email.clone();
|
||||
if User::find_by_mail(&data.email, &conn).await.is_some() {
|
||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||
err_code!("User already exists", Status::Conflict.code)
|
||||
}
|
||||
|
||||
let mut user = User::new(email);
|
||||
|
||||
async fn _generate_invite(user: &User, conn: &DbConn) -> EmptyResult {
|
||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
let invitation = Invitation::new(user.email.clone());
|
||||
let invitation = Invitation::new(&user.email);
|
||||
invitation.save(conn).await
|
||||
}
|
||||
}
|
||||
|
||||
_generate_invite(&user, &conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
user.save(&conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
_generate_invite(&user, &mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
user.save(&mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
|
||||
Ok(Json(user.to_json(&conn).await))
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[post("/test/smtp", data = "<data>")]
|
||||
@@ -301,99 +313,159 @@ async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[get("/logout")]
|
||||
fn logout(cookies: &CookieJar<'_>, referer: Referer) -> Redirect {
|
||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
Redirect::to(admin_url(referer))
|
||||
Redirect::to(admin_path())
|
||||
}
|
||||
|
||||
#[get("/users")]
|
||||
async fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||
let users_json = stream::iter(User::get_all(&conn).await)
|
||||
.then(|u| async {
|
||||
let u = u; // Move out this single variable
|
||||
let mut usr = u.to_json(&conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
||||
let users = User::get_all(&mut conn).await;
|
||||
let mut users_json = Vec::with_capacity(users.len());
|
||||
for u in users {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
users_json.push(usr);
|
||||
}
|
||||
|
||||
Json(Value::Array(users_json))
|
||||
}
|
||||
|
||||
#[get("/users/overview")]
|
||||
async fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users_json = stream::iter(User::get_all(&conn).await)
|
||||
.then(|u| async {
|
||||
let u = u; // Move out this single variable
|
||||
let mut usr = u.to_json(&conn).await;
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn).await);
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn).await);
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn).await as i32));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["last_active"] = match u.last_active(&conn).await {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||
None => json!("Never"),
|
||||
};
|
||||
usr
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&mut conn).await;
|
||||
let mut users_json = Vec::with_capacity(users.len());
|
||||
for u in users {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||
None => json!("Never"),
|
||||
};
|
||||
users_json.push(usr);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?;
|
||||
let text = AdminTemplateData::new("admin/users", json!(users_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/users/by-mail/<mail>")]
|
||||
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>")]
|
||||
async fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(&uuid, &conn).await?;
|
||||
let mut usr = u.to_json(&conn).await;
|
||||
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(uuid, &mut conn).await?;
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
async fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(&uuid, &conn).await?;
|
||||
user.delete(&conn).await
|
||||
async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(uuid, &mut conn).await?;
|
||||
|
||||
// Get the user_org records before deleting the actual user
|
||||
let user_orgs = UserOrganization::find_any_state_by_user(uuid, &mut conn).await;
|
||||
let res = user.delete(&mut conn).await;
|
||||
|
||||
for user_org in user_orgs {
|
||||
log_event(
|
||||
EventType::OrganizationUserRemoved as i32,
|
||||
&user_org.uuid,
|
||||
&user_org.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
async fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||
match unregister_push_device(device.uuid).await {
|
||||
Ok(r) => r,
|
||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn).await
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
async fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
|
||||
user.save(&conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
async fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||
async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
user.enabled = true;
|
||||
|
||||
user.save(&conn).await
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
async fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
async fn remove_2fa(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.totp_recover = None;
|
||||
user.save(&conn).await
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/invite/resend")]
|
||||
async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
if let Some(user) = User::find_by_uuid(uuid, &mut conn).await {
|
||||
//TODO: replace this with user.status check when it will be available (PR#3397)
|
||||
if !user.password_hash.is_empty() {
|
||||
err_code!("User already accepted invitation", Status::BadRequest.code);
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -404,13 +476,14 @@ struct UserOrgTypeData {
|
||||
}
|
||||
|
||||
#[post("/users/org_type", data = "<data>")]
|
||||
async fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let data: UserOrgTypeData = data.into_inner();
|
||||
|
||||
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
let mut user_to_edit =
|
||||
match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
|
||||
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
@@ -418,47 +491,70 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, c
|
||||
};
|
||||
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there are at least another owner
|
||||
let num_owners =
|
||||
UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).await.len();
|
||||
|
||||
if num_owners <= 1 {
|
||||
// Removing owner permission, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 {
|
||||
err!("Can't change the type of the last owner")
|
||||
}
|
||||
}
|
||||
|
||||
// This check is also done at api::organizations::{accept_invite(), _confirm_invite, _activate_user(), edit_user()}, update_user_org_type
|
||||
// It returns different error messages per function.
|
||||
if new_type < UserOrgType::Admin {
|
||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
||||
Ok(_) => {}
|
||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
||||
}
|
||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationUserUpdated as i32,
|
||||
&user_to_edit.uuid,
|
||||
&data.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
user_to_edit.atype = new_type;
|
||||
user_to_edit.save(&conn).await
|
||||
user_to_edit.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/update_revision")]
|
||||
async fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&conn).await
|
||||
async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&mut conn).await
|
||||
}
|
||||
|
||||
#[get("/organizations/overview")]
|
||||
async fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations_json = stream::iter(Organization::get_all(&conn).await)
|
||||
.then(|o| async {
|
||||
let o = o; //Move out this single variable
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn).await);
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn).await);
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn).await);
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn).await as i32));
|
||||
org
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations = Organization::get_all(&mut conn).await;
|
||||
let mut organizations_json = Vec::with_capacity(organizations.len());
|
||||
for o in organizations {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
||||
organizations_json.push(org);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?;
|
||||
let text = AdminTemplateData::new("admin/organizations", json!(organizations_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/organizations/<uuid>/delete")]
|
||||
async fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &conn).await.map_res("Organization doesn't exist")?;
|
||||
org.delete(&conn).await
|
||||
async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(uuid, &mut conn).await.map_res("Organization doesn't exist")?;
|
||||
org.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -476,10 +572,20 @@ struct GitCommit {
|
||||
sha: String,
|
||||
}
|
||||
|
||||
async fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let github_api = get_reqwest_client();
|
||||
#[derive(Deserialize)]
|
||||
struct TimeApi {
|
||||
year: u16,
|
||||
month: u8,
|
||||
day: u8,
|
||||
hour: u8,
|
||||
minute: u8,
|
||||
seconds: u8,
|
||||
}
|
||||
|
||||
Ok(github_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let json_api = get_reqwest_client();
|
||||
|
||||
Ok(json_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
}
|
||||
|
||||
async fn has_http_access() -> bool {
|
||||
@@ -498,16 +604,14 @@ use cached::proc_macro::cached;
|
||||
async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) {
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
if has_http_access {
|
||||
info!("Running get_release_info!!");
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||
match get_json_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await
|
||||
{
|
||||
match get_json_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await {
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
@@ -519,7 +623,7 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
||||
if running_within_docker {
|
||||
"-".to_string()
|
||||
} else {
|
||||
match get_github_api::<GitRelease>(
|
||||
match get_json_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
)
|
||||
.await
|
||||
@@ -534,16 +638,34 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_ntp_time(has_http_access: bool) -> String {
|
||||
if has_http_access {
|
||||
if let Ok(ntp_time) = get_json_api::<TimeApi>("https://www.timeapi.io/api/Time/current/zone?timeZone=UTC").await
|
||||
{
|
||||
return format!(
|
||||
"{year}-{month:02}-{day:02} {hour:02}:{minute:02}:{seconds:02} UTC",
|
||||
year = ntp_time.year,
|
||||
month = ntp_time.month,
|
||||
day = ntp_time.day,
|
||||
hour = ntp_time.hour,
|
||||
minute = ntp_time.minute,
|
||||
seconds = ntp_time.seconds
|
||||
);
|
||||
}
|
||||
}
|
||||
String::from("Unable to fetch NTP time.")
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match std::fs::read_to_string(&format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
||||
match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match std::fs::read_to_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
_ => match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
@@ -562,7 +684,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> A
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
_ => "Unable to resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) =
|
||||
@@ -575,13 +697,14 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> A
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
"current_release": VERSION,
|
||||
"latest_release": latest_release,
|
||||
"latest_commit": latest_commit,
|
||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
||||
"latest_web_build": latest_web_build,
|
||||
"running_within_docker": running_within_docker,
|
||||
"docker_base_image": docker_base_image(),
|
||||
"docker_base_image": if running_within_docker { docker_base_image() } else { "Not applicable" },
|
||||
"has_http_access": has_http_access,
|
||||
"ip_header_exists": &ip_header.0.is_some(),
|
||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||
@@ -589,14 +712,17 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> A
|
||||
"ip_header_config": &CONFIG.ip_header(),
|
||||
"uses_proxy": uses_proxy,
|
||||
"db_type": *DB_TYPE,
|
||||
"db_version": get_sql_server_version(&conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||
"db_version": get_sql_server_version(&mut conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"host_arch": std::env::consts::ARCH,
|
||||
"host_os": std::env::consts::OS,
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
|
||||
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
|
||||
});
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/diagnostics", diagnostics_json).render()?;
|
||||
let text = AdminTemplateData::new("admin/diagnostics", diagnostics_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
@@ -618,44 +744,60 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/config/backup_db")]
|
||||
async fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
if *CAN_BACKUP {
|
||||
backup_database(&conn).await
|
||||
backup_database(&mut conn).await
|
||||
} else {
|
||||
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AdminToken {}
|
||||
pub struct AdminToken {
|
||||
ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for AdminToken {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
if CONFIG.disable_admin_token() {
|
||||
Outcome::Success(AdminToken {})
|
||||
Outcome::Success(Self {
|
||||
ip,
|
||||
})
|
||||
} else {
|
||||
let cookies = request.cookies();
|
||||
|
||||
let access_token = match cookies.get(COOKIE_NAME) {
|
||||
Some(cookie) => cookie.value(),
|
||||
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
|
||||
};
|
||||
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip.ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
None => {
|
||||
let requested_page =
|
||||
request.segments::<std::path::PathBuf>(0..).unwrap_or_default().display().to_string();
|
||||
// When the requested page is empty, it is `/admin`, in that case, Forward, so it will render the login page
|
||||
// Else, return a 401 failure, which will be caught
|
||||
if requested_page.is_empty() {
|
||||
return Outcome::Forward(Status::Unauthorized);
|
||||
} else {
|
||||
return Outcome::Failure((Status::Unauthorized, "Unauthorized"));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if decode_admin(access_token).is_err() {
|
||||
// Remove admin cookie
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
||||
return Outcome::Forward(());
|
||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
||||
}
|
||||
|
||||
Outcome::Success(AdminToken {})
|
||||
Outcome::Success(Self {
|
||||
ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -3,13 +3,21 @@ use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
api::{
|
||||
core::log_user_event, register_push_device, unregister_push_device, EmptyResult, JsonResult, JsonUpcase,
|
||||
Notify, NumberOrString, PasswordData, UpdateType,
|
||||
},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
use rocket::{
|
||||
http::Status,
|
||||
request::{FromRequest, Outcome, Request},
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
register,
|
||||
@@ -28,6 +36,7 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
post_verify_email_token,
|
||||
post_delete_recover,
|
||||
post_delete_recover_token,
|
||||
post_device_token,
|
||||
delete_account,
|
||||
post_delete_account,
|
||||
revision_date,
|
||||
@@ -36,15 +45,23 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
verify_password,
|
||||
api_key,
|
||||
rotate_api_key,
|
||||
get_known_device,
|
||||
get_known_device_from_path,
|
||||
put_avatar,
|
||||
put_device_token,
|
||||
put_clear_device_token,
|
||||
post_clear_device_token,
|
||||
]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct RegisterData {
|
||||
pub struct RegisterData {
|
||||
Email: String,
|
||||
Kdf: Option<i32>,
|
||||
KdfIterations: Option<i32>,
|
||||
KdfMemory: Option<i32>,
|
||||
KdfParallelism: Option<i32>,
|
||||
Key: String,
|
||||
Keys: Option<KeysData>,
|
||||
MasterPasswordHash: String,
|
||||
@@ -81,7 +98,11 @@ fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult
|
||||
}
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, conn).await
|
||||
}
|
||||
|
||||
pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> JsonResult {
|
||||
let data: RegisterData = data.into_inner().data;
|
||||
let email = data.Email.to_lowercase();
|
||||
|
||||
@@ -98,33 +119,34 @@ async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
let password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&password_hint)?;
|
||||
|
||||
let mut user = match User::find_by_mail(&email, &conn).await {
|
||||
Some(user) => {
|
||||
let mut verified_by_invite = false;
|
||||
|
||||
let mut user = match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(mut user) => {
|
||||
if !user.password_hash.is_empty() {
|
||||
if CONFIG.is_signup_allowed(&email) {
|
||||
err!("User already exists")
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
|
||||
if let Some(token) = data.Token {
|
||||
let claims = decode_invite(&token)?;
|
||||
if claims.email == email {
|
||||
// Verify the email address when signing up via a valid invite token
|
||||
verified_by_invite = true;
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
user
|
||||
} else {
|
||||
err!("Registration email does not match invite email")
|
||||
}
|
||||
} else if Invitation::take(&email, &conn).await {
|
||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).await.iter_mut() {
|
||||
} else if Invitation::take(&email, &mut conn).await {
|
||||
for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
|
||||
user_org.status = UserOrgStatus::Accepted as i32;
|
||||
user_org.save(&conn).await?;
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
user
|
||||
} else if EmergencyAccess::find_invited_by_grantee_email(&email, &conn).await.is_some() {
|
||||
} else if CONFIG.is_signup_allowed(&email)
|
||||
|| EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some()
|
||||
{
|
||||
user
|
||||
} else if CONFIG.is_signup_allowed(&email) {
|
||||
err!("Account with this email already exists")
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
@@ -133,7 +155,7 @@ async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
// Order is important here; the invitation check must come first
|
||||
// because the vaultwarden admin can invite anyone, regardless
|
||||
// of other signup restrictions.
|
||||
if Invitation::take(&email, &conn).await || CONFIG.is_signup_allowed(&email) {
|
||||
if Invitation::take(&email, &mut conn).await || CONFIG.is_signup_allowed(&email) {
|
||||
User::new(email.clone())
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
@@ -142,18 +164,20 @@ async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
};
|
||||
|
||||
// Make sure we don't leave a lingering invitation.
|
||||
Invitation::take(&email, &conn).await;
|
||||
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
}
|
||||
Invitation::take(&email, &mut conn).await;
|
||||
|
||||
if let Some(client_kdf_type) = data.Kdf {
|
||||
user.client_kdf_type = client_kdf_type;
|
||||
}
|
||||
|
||||
user.set_password(&data.MasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
}
|
||||
|
||||
user.client_kdf_memory = data.KdfMemory;
|
||||
user.client_kdf_parallelism = data.KdfParallelism;
|
||||
|
||||
user.set_password(&data.MasterPasswordHash, Some(data.Key), true, None);
|
||||
user.password_hint = password_hint;
|
||||
|
||||
// Add extra fields if present
|
||||
@@ -167,7 +191,7 @@ async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if CONFIG.signups_verify() {
|
||||
if CONFIG.signups_verify() && !verified_by_invite {
|
||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
@@ -178,20 +202,23 @@ async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
}
|
||||
|
||||
user.save(&conn).await
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(json!({
|
||||
"Object": "register",
|
||||
"CaptchaBypassToken": "",
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/accounts/profile")]
|
||||
async fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&conn).await)
|
||||
async fn profile(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&mut conn).await)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ProfileData {
|
||||
#[serde(rename = "Culture")]
|
||||
_Culture: String, // Ignored, always use en-US
|
||||
MasterPasswordHint: Option<String>,
|
||||
// Culture: String, // Ignored, always use en-US
|
||||
// MasterPasswordHint: Option<String>, // Ignored, has been moved to ChangePassData
|
||||
Name: String,
|
||||
}
|
||||
|
||||
@@ -201,7 +228,7 @@ async fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbCo
|
||||
}
|
||||
|
||||
#[post("/accounts/profile", data = "<data>")]
|
||||
async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: ProfileData = data.into_inner().data;
|
||||
|
||||
// Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
|
||||
@@ -212,16 +239,40 @@ async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbC
|
||||
|
||||
let mut user = headers.user;
|
||||
user.name = data.Name;
|
||||
user.password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&user.password_hint)?;
|
||||
|
||||
user.save(&conn).await?;
|
||||
Ok(Json(user.to_json(&conn).await))
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct AvatarData {
|
||||
AvatarColor: Option<String>,
|
||||
}
|
||||
|
||||
#[put("/accounts/avatar", data = "<data>")]
|
||||
async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: AvatarData = data.into_inner().data;
|
||||
|
||||
// It looks like it only supports the 6 hex color format.
|
||||
// If you try to add the short value it will not show that color.
|
||||
// Check and force 7 chars, including the #.
|
||||
if let Some(color) = &data.AvatarColor {
|
||||
if color.len() != 7 {
|
||||
err!("The field AvatarColor must be a HTML/Hex color code with a length of 7 characters")
|
||||
}
|
||||
}
|
||||
|
||||
let mut user = headers.user;
|
||||
user.avatar_color = data.AvatarColor;
|
||||
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>/public-key")]
|
||||
async fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&uuid, &conn).await {
|
||||
async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -234,7 +285,7 @@ async fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonR
|
||||
}
|
||||
|
||||
#[post("/accounts/keys", data = "<data>")]
|
||||
async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: KeysData = data.into_inner().data;
|
||||
|
||||
let mut user = headers.user;
|
||||
@@ -242,7 +293,7 @@ async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -
|
||||
user.private_key = Some(data.EncryptedPrivateKey);
|
||||
user.public_key = Some(data.PublicKey);
|
||||
|
||||
user.save(&conn).await?;
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"PrivateKey": user.private_key,
|
||||
@@ -256,11 +307,17 @@ async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -
|
||||
struct ChangePassData {
|
||||
MasterPasswordHash: String,
|
||||
NewMasterPasswordHash: String,
|
||||
MasterPasswordHint: Option<String>,
|
||||
Key: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/password", data = "<data>")]
|
||||
async fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_password(
|
||||
data: JsonUpcase<ChangePassData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: ChangePassData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -268,12 +325,27 @@ async fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn:
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&user.password_hint)?;
|
||||
|
||||
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn)
|
||||
.await;
|
||||
|
||||
user.set_password(
|
||||
&data.NewMasterPasswordHash,
|
||||
Some(data.Key),
|
||||
true,
|
||||
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
|
||||
);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn).await
|
||||
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
// Prevent loging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -281,6 +353,8 @@ async fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn:
|
||||
struct ChangeKdfData {
|
||||
Kdf: i32,
|
||||
KdfIterations: i32,
|
||||
KdfMemory: Option<i32>,
|
||||
KdfParallelism: Option<i32>,
|
||||
|
||||
MasterPasswordHash: String,
|
||||
NewMasterPasswordHash: String,
|
||||
@@ -288,7 +362,7 @@ struct ChangeKdfData {
|
||||
}
|
||||
|
||||
#[post("/accounts/kdf", data = "<data>")]
|
||||
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let data: ChangeKdfData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -296,11 +370,42 @@ async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbCon
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if data.Kdf == UserKdfType::Pbkdf2 as i32 && data.KdfIterations < 100_000 {
|
||||
err!("PBKDF2 KDF iterations must be at least 100000.")
|
||||
}
|
||||
|
||||
if data.Kdf == UserKdfType::Argon2id as i32 {
|
||||
if data.KdfIterations < 1 {
|
||||
err!("Argon2 KDF iterations must be at least 1.")
|
||||
}
|
||||
if let Some(m) = data.KdfMemory {
|
||||
if !(15..=1024).contains(&m) {
|
||||
err!("Argon2 memory must be between 15 MB and 1024 MB.")
|
||||
}
|
||||
user.client_kdf_memory = data.KdfMemory;
|
||||
} else {
|
||||
err!("Argon2 memory parameter is required.")
|
||||
}
|
||||
if let Some(p) = data.KdfParallelism {
|
||||
if !(1..=16).contains(&p) {
|
||||
err!("Argon2 parallelism must be between 1 and 16.")
|
||||
}
|
||||
user.client_kdf_parallelism = data.KdfParallelism;
|
||||
} else {
|
||||
err!("Argon2 parallelism parameter is required.")
|
||||
}
|
||||
} else {
|
||||
user.client_kdf_memory = None;
|
||||
user.client_kdf_parallelism = None;
|
||||
}
|
||||
user.client_kdf_iter = data.KdfIterations;
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn).await
|
||||
user.set_password(&data.NewMasterPasswordHash, Some(data.Key), true, None);
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -323,18 +428,24 @@ struct KeyData {
|
||||
}
|
||||
|
||||
#[post("/accounts/key", data = "<data>")]
|
||||
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let data: KeyData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
// Validate the import before continuing
|
||||
// Bitwarden does not process the import if there is one item invalid.
|
||||
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
||||
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||
Cipher::validate_notes(&data.Ciphers)?;
|
||||
|
||||
let user_uuid = &headers.user.uuid;
|
||||
|
||||
// Update folder data
|
||||
for folder_data in data.Folders {
|
||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn).await {
|
||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
None => err!("Folder doesn't exist"),
|
||||
};
|
||||
@@ -344,14 +455,14 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbCon
|
||||
}
|
||||
|
||||
saved_folder.name = folder_data.Name;
|
||||
saved_folder.save(&conn).await?
|
||||
saved_folder.save(&mut conn).await?
|
||||
}
|
||||
|
||||
// Update cipher data
|
||||
use super::ciphers::update_cipher_from_data;
|
||||
|
||||
for cipher_data in data.Ciphers {
|
||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn).await {
|
||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
@@ -362,7 +473,9 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbCon
|
||||
|
||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None).await?
|
||||
// We force the users to logout after the user has been saved to try and prevent these issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None)
|
||||
.await?
|
||||
}
|
||||
|
||||
// Update user data
|
||||
@@ -372,11 +485,23 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbCon
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
// Prevent loging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/accounts/security-stamp", data = "<data>")]
|
||||
async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_sstamp(
|
||||
data: JsonUpcase<PasswordData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -384,9 +509,13 @@ async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbC
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.save(&conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -397,7 +526,7 @@ struct EmailTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email-token", data = "<data>")]
|
||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: EmailTokenData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -405,7 +534,7 @@ async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, co
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if User::find_by_mail(&data.NewEmail, &conn).await.is_some() {
|
||||
if User::find_by_mail(&data.NewEmail, &mut conn).await.is_some() {
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
@@ -423,7 +552,7 @@ async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, co
|
||||
|
||||
user.email_new = Some(data.NewEmail);
|
||||
user.email_new_token = Some(token);
|
||||
user.save(&conn).await
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -438,7 +567,12 @@ struct ChangeEmailData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email", data = "<data>")]
|
||||
async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_email(
|
||||
data: JsonUpcase<ChangeEmailData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: ChangeEmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -446,7 +580,7 @@ async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: D
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if User::find_by_mail(&data.NewEmail, &conn).await.is_some() {
|
||||
if User::find_by_mail(&data.NewEmail, &mut conn).await.is_some() {
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
@@ -478,10 +612,13 @@ async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: D
|
||||
user.email_new = None;
|
||||
user.email_new_token = None;
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.set_password(&data.NewMasterPasswordHash, Some(data.Key), true, None);
|
||||
|
||||
user.save(&conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email")]
|
||||
@@ -507,10 +644,10 @@ struct VerifyEmailTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email-token", data = "<data>")]
|
||||
async fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: VerifyEmailTokenData = data.into_inner().data;
|
||||
|
||||
let mut user = match User::find_by_uuid(&data.UserId, &conn).await {
|
||||
let mut user = match User::find_by_uuid(&data.UserId, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -525,7 +662,7 @@ async fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: D
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
user.last_verifying_at = None;
|
||||
user.login_verify_count = 0;
|
||||
if let Err(e) = user.save(&conn).await {
|
||||
if let Err(e) = user.save(&mut conn).await {
|
||||
error!("Error saving email verification: {:#?}", e);
|
||||
}
|
||||
|
||||
@@ -539,11 +676,11 @@ struct DeleteRecoverData {
|
||||
}
|
||||
|
||||
#[post("/accounts/delete-recover", data = "<data>")]
|
||||
async fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: DeleteRecoverData = data.into_inner().data;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Some(user) = User::find_by_mail(&data.Email, &conn).await {
|
||||
if let Some(user) = User::find_by_mail(&data.Email, &mut conn).await {
|
||||
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid).await {
|
||||
error!("Error sending delete account email: {:#?}", e);
|
||||
}
|
||||
@@ -566,10 +703,10 @@ struct DeleteRecoverTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/delete-recover-token", data = "<data>")]
|
||||
async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: DeleteRecoverTokenData = data.into_inner().data;
|
||||
|
||||
let user = match User::find_by_uuid(&data.UserId, &conn).await {
|
||||
let user = match User::find_by_uuid(&data.UserId, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -581,7 +718,7 @@ async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, con
|
||||
if claims.sub != user.uuid {
|
||||
err!("Invalid claim");
|
||||
}
|
||||
user.delete(&conn).await
|
||||
user.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/delete", data = "<data>")]
|
||||
@@ -590,7 +727,7 @@ async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, c
|
||||
}
|
||||
|
||||
#[delete("/accounts", data = "<data>")]
|
||||
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -598,13 +735,13 @@ async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn:
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.delete(&conn).await
|
||||
user.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[get("/accounts/revision-date")]
|
||||
fn revision_date(headers: Headers) -> String {
|
||||
fn revision_date(headers: Headers) -> JsonResult {
|
||||
let revision_date = headers.user.updated_at.timestamp_millis();
|
||||
revision_date.to_string()
|
||||
Ok(Json(json!(revision_date)))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -614,7 +751,7 @@ struct PasswordHintData {
|
||||
}
|
||||
|
||||
#[post("/accounts/password-hint", data = "<data>")]
|
||||
async fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
||||
async fn password_hint(data: JsonUpcase<PasswordHintData>, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.mail_enabled() && !CONFIG.show_password_hint() {
|
||||
err!("This server is not configured to provide password hints.");
|
||||
}
|
||||
@@ -624,7 +761,7 @@ async fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> Empt
|
||||
let data: PasswordHintData = data.into_inner().data;
|
||||
let email = &data.Email;
|
||||
|
||||
match User::find_by_mail(email, &conn).await {
|
||||
match User::find_by_mail(email, &mut conn).await {
|
||||
None => {
|
||||
// To prevent user enumeration, act as if the user exists.
|
||||
if CONFIG.mail_enabled() {
|
||||
@@ -647,7 +784,7 @@ async fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> Empt
|
||||
mail::send_password_hint(email, hint).await?;
|
||||
Ok(())
|
||||
} else if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", hint));
|
||||
err!(format!("Your password hint is: {hint}"));
|
||||
} else {
|
||||
err!(NO_HINT);
|
||||
}
|
||||
@@ -666,18 +803,22 @@ async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
_prelogin(data, conn).await
|
||||
}
|
||||
|
||||
pub async fn _prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
pub async fn _prelogin(data: JsonUpcase<PreloginData>, mut conn: DbConn) -> Json<Value> {
|
||||
let data: PreloginData = data.into_inner().data;
|
||||
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn).await {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||
let (kdf_type, kdf_iter, kdf_mem, kdf_para) = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter, user.client_kdf_memory, user.client_kdf_parallelism),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT, None, None),
|
||||
};
|
||||
|
||||
Json(json!({
|
||||
let result = json!({
|
||||
"Kdf": kdf_type,
|
||||
"KdfIterations": kdf_iter
|
||||
}))
|
||||
"KdfIterations": kdf_iter,
|
||||
"KdfMemory": kdf_mem,
|
||||
"KdfParallelism": kdf_para,
|
||||
});
|
||||
|
||||
Json(result)
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/master/src/Api/Models/Request/Accounts/SecretVerificationRequestModel.cs
|
||||
@@ -703,8 +844,10 @@ async fn _api_key(
|
||||
data: JsonUpcase<SecretVerificationRequest>,
|
||||
rotate: bool,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
use crate::util::format_date;
|
||||
|
||||
let data: SecretVerificationRequest = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -714,11 +857,12 @@ async fn _api_key(
|
||||
|
||||
if rotate || user.api_key.is_none() {
|
||||
user.api_key = Some(crypto::generate_api_key());
|
||||
user.save(&conn).await.expect("Error saving API key");
|
||||
user.save(&mut conn).await.expect("Error saving API key");
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"ApiKey": user.api_key,
|
||||
"RevisionDate": format_date(&user.updated_at),
|
||||
"Object": "apiKey",
|
||||
})))
|
||||
}
|
||||
@@ -732,3 +876,123 @@ async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers,
|
||||
async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_api_key(data, true, headers, conn).await
|
||||
}
|
||||
|
||||
// This variant is deprecated: https://github.com/bitwarden/server/pull/2682
|
||||
#[get("/devices/knowndevice/<email>/<uuid>")]
|
||||
async fn get_known_device_from_path(email: &str, uuid: &str, mut conn: DbConn) -> JsonResult {
|
||||
// This endpoint doesn't have auth header
|
||||
let mut result = false;
|
||||
if let Some(user) = User::find_by_mail(email, &mut conn).await {
|
||||
result = Device::find_by_uuid_and_user(uuid, &user.uuid, &mut conn).await.is_some();
|
||||
}
|
||||
Ok(Json(json!(result)))
|
||||
}
|
||||
|
||||
#[get("/devices/knowndevice")]
|
||||
async fn get_known_device(device: KnownDevice, conn: DbConn) -> JsonResult {
|
||||
get_known_device_from_path(&device.email, &device.uuid, conn).await
|
||||
}
|
||||
|
||||
struct KnownDevice {
|
||||
email: String,
|
||||
uuid: String,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for KnownDevice {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
|
||||
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
||||
Ok(bytes) => bytes,
|
||||
Err(_) => {
|
||||
return Outcome::Failure((
|
||||
Status::BadRequest,
|
||||
"X-Request-Email value failed to decode as base64url",
|
||||
));
|
||||
}
|
||||
};
|
||||
match String::from_utf8(email_bytes) {
|
||||
Ok(email) => email,
|
||||
Err(_) => {
|
||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value is required"));
|
||||
};
|
||||
|
||||
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
||||
uuid.to_string()
|
||||
} else {
|
||||
return Outcome::Failure((Status::BadRequest, "X-Device-Identifier value is required"));
|
||||
};
|
||||
|
||||
Outcome::Success(KnownDevice {
|
||||
email,
|
||||
uuid,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct PushToken {
|
||||
PushToken: String,
|
||||
}
|
||||
|
||||
#[post("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
async fn post_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
put_device_token(uuid, data, headers, conn).await
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let data = data.into_inner().data;
|
||||
let token = data.PushToken;
|
||||
let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await {
|
||||
Some(device) => device,
|
||||
None => err!(format!("Error: device {uuid} should be present before a token can be assigned")),
|
||||
};
|
||||
device.push_token = Some(token);
|
||||
if device.push_uuid.is_none() {
|
||||
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
||||
}
|
||||
if let Err(e) = device.save(&mut conn).await {
|
||||
err!(format!("An error occured while trying to save the device push token: {e}"));
|
||||
}
|
||||
if let Err(e) = register_push_device(headers.user.uuid, device).await {
|
||||
err!(format!("An error occured while proceeding registration of a device: {e}"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
||||
// This only clears push token
|
||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
// This is somehow not implemented in any app, added it in case it is required
|
||||
if !CONFIG.push_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await {
|
||||
Device::clear_push_token_by_uuid(uuid, &mut conn).await?;
|
||||
unregister_push_device(device.uuid).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// On upstream server, both PUT and POST are declared. Implementing the POST method in case it would be useful somewhere
|
||||
#[post("/devices/identifier/<uuid>/clear-token")]
|
||||
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
|
||||
put_clear_device_token(uuid, conn).await
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
use chrono::{Duration, Utc};
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket::{serde::json::Json, Route};
|
||||
use serde_json::Value;
|
||||
use std::borrow::Borrow;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
@@ -14,8 +12,6 @@ use crate::{
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
use futures::{stream, stream::StreamExt};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
get_contacts,
|
||||
@@ -41,17 +37,14 @@ pub fn routes() -> Vec<Route> {
|
||||
// region get
|
||||
|
||||
#[get("/emergency-access/trusted")]
|
||||
async fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access_list_json =
|
||||
stream::iter(EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn).await)
|
||||
.then(|e| async {
|
||||
let e = e; // Move out this single variable
|
||||
e.to_json_grantee_details(&conn).await
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||
for ea in emergency_access_list {
|
||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
@@ -61,17 +54,14 @@ async fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[get("/emergency-access/granted")]
|
||||
async fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access_list_json =
|
||||
stream::iter(EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn).await)
|
||||
.then(|e| async {
|
||||
let e = e; // Move out this single variable
|
||||
e.to_json_grantor_details(&conn).await
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await;
|
||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||
for ea in emergency_access_list {
|
||||
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
@@ -81,11 +71,11 @@ async fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[get("/emergency-access/<emer_id>")]
|
||||
async fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult {
|
||||
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&conn).await)),
|
||||
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
||||
None => err!("Emergency access not valid."),
|
||||
}
|
||||
}
|
||||
@@ -94,7 +84,7 @@ async fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult {
|
||||
|
||||
// region put/post
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EmergencyAccessUpdateData {
|
||||
Type: NumberOrString,
|
||||
@@ -103,25 +93,21 @@ struct EmergencyAccessUpdateData {
|
||||
}
|
||||
|
||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn put_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
||||
post_emergency_access(emer_id, data, conn).await
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn post_emergency_access(
|
||||
emer_id: String,
|
||||
emer_id: &str,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emergency_access) => emergency_access,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -133,9 +119,11 @@ async fn post_emergency_access(
|
||||
|
||||
emergency_access.atype = new_type;
|
||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||
if data.KeyEncrypted.is_some() {
|
||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||
}
|
||||
|
||||
emergency_access.save(&conn).await?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
}
|
||||
|
||||
@@ -144,12 +132,12 @@ async fn post_emergency_access(
|
||||
// region delete
|
||||
|
||||
#[delete("/emergency-access/<emer_id>")]
|
||||
async fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let grantor_user = headers.user;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => {
|
||||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
||||
err!("Emergency access not valid.")
|
||||
@@ -158,12 +146,12 @@ async fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn
|
||||
}
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
emergency_access.delete(&conn).await?;
|
||||
emergency_access.delete(&mut conn).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/delete")]
|
||||
async fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_emergency_access(emer_id, headers, conn).await
|
||||
}
|
||||
|
||||
@@ -171,7 +159,7 @@ async fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: D
|
||||
|
||||
// region invite
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EmergencyAccessInviteData {
|
||||
Email: String,
|
||||
@@ -180,7 +168,7 @@ struct EmergencyAccessInviteData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/invite", data = "<data>")]
|
||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
||||
@@ -201,10 +189,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||
err!("You can not set yourself as an emergency contact.")
|
||||
}
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &conn).await {
|
||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
||||
None => {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("Grantee user does not exist: {}", email))
|
||||
err!(format!("Grantee user does not exist: {}", &email))
|
||||
}
|
||||
|
||||
if !CONFIG.is_email_domain_allowed(&email) {
|
||||
@@ -212,12 +200,12 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||
}
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
let invitation = Invitation::new(email.clone());
|
||||
invitation.save(&conn).await?;
|
||||
let invitation = Invitation::new(&email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
let mut user = User::new(email.clone());
|
||||
user.save(&conn).await?;
|
||||
user.save(&mut conn).await?;
|
||||
user
|
||||
}
|
||||
Some(user) => user,
|
||||
@@ -227,41 +215,34 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||
&grantor_user.uuid,
|
||||
&grantee_user.uuid,
|
||||
&grantee_user.email,
|
||||
&conn,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
.is_some()
|
||||
{
|
||||
err!(format!("Grantee user already invited: {}", email))
|
||||
err!(format!("Grantee user already invited: {}", &grantee_user.email))
|
||||
}
|
||||
|
||||
let mut new_emergency_access = EmergencyAccess::new(
|
||||
grantor_user.uuid.clone(),
|
||||
Some(grantee_user.email.clone()),
|
||||
emergency_access_status,
|
||||
new_type,
|
||||
wait_time_days,
|
||||
);
|
||||
new_emergency_access.save(&conn).await?;
|
||||
let mut new_emergency_access =
|
||||
EmergencyAccess::new(grantor_user.uuid, grantee_user.email, emergency_access_status, new_type, wait_time_days);
|
||||
new_emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite(
|
||||
&grantee_user.email,
|
||||
&new_emergency_access.email.expect("Grantee email does not exists"),
|
||||
&grantee_user.uuid,
|
||||
Some(new_emergency_access.uuid),
|
||||
Some(grantor_user.name.clone()),
|
||||
Some(grantor_user.email),
|
||||
&new_emergency_access.uuid,
|
||||
&grantor_user.name,
|
||||
&grantor_user.email,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match User::find_by_mail(&email, &conn).await {
|
||||
Some(user) => {
|
||||
match accept_invite_process(user.uuid, new_emergency_access.uuid, Some(email), conn.borrow()).await {
|
||||
Ok(v) => (v),
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
}
|
||||
match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
},
|
||||
None => err!("Grantee user not found."),
|
||||
}
|
||||
}
|
||||
@@ -270,10 +251,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||
async fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -291,7 +272,7 @@ async fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> Empty
|
||||
None => err!("Email not valid."),
|
||||
};
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &conn).await {
|
||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
@@ -302,22 +283,20 @@ async fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> Empty
|
||||
mail::send_emergency_access_invite(
|
||||
&email,
|
||||
&grantor_user.uuid,
|
||||
Some(emergency_access.uuid),
|
||||
Some(grantor_user.name.clone()),
|
||||
Some(grantor_user.email),
|
||||
&emergency_access.uuid,
|
||||
&grantor_user.name,
|
||||
&grantor_user.email,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
if Invitation::find_by_mail(&email, &conn).await.is_none() {
|
||||
let invitation = Invitation::new(email);
|
||||
invitation.save(&conn).await?;
|
||||
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||
let invitation = Invitation::new(&email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match accept_invite_process(grantee_user.uuid, emergency_access.uuid, emergency_access.email, conn.borrow())
|
||||
.await
|
||||
{
|
||||
Ok(v) => (v),
|
||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
}
|
||||
@@ -332,38 +311,44 @@ struct AcceptData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||
async fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) -> EmptyResult {
|
||||
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: AcceptData = data.into_inner().data;
|
||||
let token = &data.Token;
|
||||
let claims = decode_emergency_access_invite(token)?;
|
||||
|
||||
let grantee_user = match User::find_by_mail(&claims.email, &conn).await {
|
||||
// This can happen if the user who received the invite used a different email to signup.
|
||||
// Since we do not know if this is intented, we error out here and do nothing with the invite.
|
||||
if claims.email != headers.user.email {
|
||||
err!("Claim email does not match current users email")
|
||||
}
|
||||
|
||||
let grantee_user = match User::find_by_mail(&claims.email, &mut conn).await {
|
||||
Some(user) => {
|
||||
Invitation::take(&claims.email, &conn).await;
|
||||
Invitation::take(&claims.email, &mut conn).await;
|
||||
user
|
||||
}
|
||||
None => err!("Invited user not found"),
|
||||
};
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if (claims.emer_id.is_some() && emer_id == claims.emer_id.unwrap())
|
||||
&& (claims.grantor_name.is_some() && grantor_user.name == claims.grantor_name.unwrap())
|
||||
&& (claims.grantor_email.is_some() && grantor_user.email == claims.grantor_email.unwrap())
|
||||
if emer_id == claims.emer_id
|
||||
&& grantor_user.name == claims.grantor_name
|
||||
&& grantor_user.email == claims.grantor_email
|
||||
{
|
||||
match accept_invite_process(grantee_user.uuid.clone(), emer_id, Some(grantee_user.email.clone()), &conn).await {
|
||||
Ok(v) => (v),
|
||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
|
||||
@@ -378,18 +363,12 @@ async fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbCo
|
||||
}
|
||||
|
||||
async fn accept_invite_process(
|
||||
grantee_uuid: String,
|
||||
emer_id: String,
|
||||
email: Option<String>,
|
||||
conn: &DbConn,
|
||||
grantee_uuid: &str,
|
||||
emergency_access: &mut EmergencyAccess,
|
||||
grantee_email: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
let emer_email = emergency_access.email;
|
||||
if emer_email.is_none() || emer_email != email {
|
||||
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
|
||||
err!("User email does not match invite.");
|
||||
}
|
||||
|
||||
@@ -398,7 +377,7 @@ async fn accept_invite_process(
|
||||
}
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
||||
emergency_access.grantee_uuid = Some(grantee_uuid);
|
||||
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
|
||||
emergency_access.email = None;
|
||||
emergency_access.save(conn).await
|
||||
}
|
||||
@@ -411,10 +390,10 @@ struct ConfirmData {
|
||||
|
||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||
async fn confirm_emergency_access(
|
||||
emer_id: String,
|
||||
emer_id: &str,
|
||||
data: JsonUpcase<ConfirmData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
@@ -422,7 +401,7 @@ async fn confirm_emergency_access(
|
||||
let data: ConfirmData = data.into_inner().data;
|
||||
let key = data.Key;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -433,13 +412,13 @@ async fn confirm_emergency_access(
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &conn).await {
|
||||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
@@ -448,7 +427,7 @@ async fn confirm_emergency_access(
|
||||
emergency_access.key_encrypted = Some(key);
|
||||
emergency_access.email = None;
|
||||
|
||||
emergency_access.save(&conn).await?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name).await?;
|
||||
@@ -464,22 +443,22 @@ async fn confirm_emergency_access(
|
||||
// region access emergency access
|
||||
|
||||
#[post("/emergency-access/<emer_id>/initiate")]
|
||||
async fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let initiating_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|
||||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid.clone())
|
||||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid)
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
@@ -489,14 +468,14 @@ async fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbCo
|
||||
emergency_access.updated_at = now;
|
||||
emergency_access.recovery_initiated_at = Some(now);
|
||||
emergency_access.last_notification_at = Some(now);
|
||||
emergency_access.save(&conn).await?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_initiated(
|
||||
&grantor_user.email,
|
||||
&initiating_user.name,
|
||||
emergency_access.get_type_as_str(),
|
||||
&emergency_access.wait_time_days.clone().to_string(),
|
||||
&emergency_access.wait_time_days,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
@@ -504,34 +483,33 @@ async fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbCo
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/approve")]
|
||||
async fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let approving_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
|| emergency_access.grantor_uuid != approving_user.uuid
|
||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&approving_user.uuid, &conn).await {
|
||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||
emergency_access.save(&conn).await?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name).await?;
|
||||
@@ -543,35 +521,34 @@ async fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbCon
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reject")]
|
||||
async fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let rejecting_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|
||||
|| emergency_access.grantor_uuid != rejecting_user.uuid
|
||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&rejecting_user.uuid, &conn).await {
|
||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||
emergency_access.save(&conn).await?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?;
|
||||
@@ -587,31 +564,34 @@ async fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn
|
||||
// region action
|
||||
|
||||
#[post("/emergency-access/<emer_id>/view")]
|
||||
async fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let host = headers.host;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::View) {
|
||||
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn).await;
|
||||
let cipher_sync_data =
|
||||
CipherSyncData::new(&emergency_access.grantor_uuid, &ciphers, CipherSyncType::User, &conn).await;
|
||||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &mut conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(&emergency_access.grantor_uuid, CipherSyncType::User, &mut conn).await;
|
||||
|
||||
let ciphers_json = stream::iter(ciphers)
|
||||
.then(|c| async {
|
||||
let c = c; // Move out this single variable
|
||||
c.to_json(&host, &emergency_access.grantor_uuid, Some(&cipher_sync_data), &conn).await
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
let mut ciphers_json = Vec::with_capacity(ciphers.len());
|
||||
for c in ciphers {
|
||||
ciphers_json.push(
|
||||
c.to_json(
|
||||
&headers.host,
|
||||
&emergency_access.grantor_uuid,
|
||||
Some(&cipher_sync_data),
|
||||
CipherSyncType::User,
|
||||
&mut conn,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Ciphers": ciphers_json,
|
||||
@@ -621,33 +601,37 @@ async fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn)
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/takeover")]
|
||||
async fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Kdf": grantor_user.client_kdf_type,
|
||||
"KdfIterations": grantor_user.client_kdf_iter,
|
||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||
"Object": "emergencyAccessTakeover",
|
||||
})))
|
||||
let result = json!({
|
||||
"Kdf": grantor_user.client_kdf_type,
|
||||
"KdfIterations": grantor_user.client_kdf_iter,
|
||||
"KdfMemory": grantor_user.client_kdf_memory,
|
||||
"KdfParallelism": grantor_user.client_kdf_parallelism,
|
||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||
"Object": "emergencyAccessTakeover",
|
||||
});
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EmergencyAccessPasswordData {
|
||||
NewMasterPasswordHash: String,
|
||||
@@ -656,44 +640,43 @@ struct EmergencyAccessPasswordData {
|
||||
|
||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||
async fn password_emergency_access(
|
||||
emer_id: String,
|
||||
emer_id: &str,
|
||||
data: JsonUpcase<EmergencyAccessPasswordData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
||||
let key = data.Key;
|
||||
//let key = &data.Key;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
// change grantor_user password
|
||||
grantor_user.set_password(new_master_password_hash, None);
|
||||
grantor_user.akey = key;
|
||||
grantor_user.save(&conn).await?;
|
||||
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None);
|
||||
grantor_user.save(&mut conn).await?;
|
||||
|
||||
// Disable TwoFactor providers since they will otherwise block logins
|
||||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn).await?;
|
||||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?;
|
||||
|
||||
// Remove grantor from all organisations unless Owner
|
||||
for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &conn).await {
|
||||
for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &mut conn).await {
|
||||
if user_org.atype != UserOrgType::Owner as i32 {
|
||||
user_org.delete(&conn).await?;
|
||||
user_org.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -702,23 +685,23 @@ async fn password_emergency_access(
|
||||
// endregion
|
||||
|
||||
#[get("/emergency-access/<emer_id>/policies")]
|
||||
async fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &conn);
|
||||
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn);
|
||||
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -730,10 +713,11 @@ async fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbCo
|
||||
|
||||
fn is_valid_request(
|
||||
emergency_access: &EmergencyAccess,
|
||||
requesting_user_uuid: String,
|
||||
requesting_user_uuid: &str,
|
||||
requested_access_type: EmergencyAccessType,
|
||||
) -> bool {
|
||||
emergency_access.grantee_uuid == Some(requesting_user_uuid)
|
||||
emergency_access.grantee_uuid.is_some()
|
||||
&& emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_uuid
|
||||
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
|
||||
&& emergency_access.atype == requested_access_type as i32
|
||||
}
|
||||
@@ -751,41 +735,45 @@ pub async fn emergency_request_timeout_job(pool: DbPool) {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn).await;
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await;
|
||||
|
||||
if emergency_access_list.is_empty() {
|
||||
debug!("No emergency request timeout to approve");
|
||||
}
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
for mut emer in emergency_access_list {
|
||||
if emer.recovery_initiated_at.is_some()
|
||||
&& Utc::now().naive_utc()
|
||||
>= emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days))
|
||||
{
|
||||
emer.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||
emer.save(&conn).await.expect("Cannot save emergency access on job");
|
||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||
let recovery_allowed_at =
|
||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
|
||||
if recovery_allowed_at.le(&now) {
|
||||
// Only update the access status
|
||||
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
||||
emer.update_access_status_and_save(EmergencyAccessStatus::RecoveryApproved as i32, &now, &mut conn)
|
||||
.await
|
||||
.expect("Unable to update emergency access status");
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user =
|
||||
User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found.");
|
||||
User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found");
|
||||
|
||||
// get grantee user to send Accepted email
|
||||
let grantee_user =
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn)
|
||||
.await
|
||||
.expect("Grantee user not found.");
|
||||
.expect("Grantee user not found");
|
||||
|
||||
mail::send_emergency_access_recovery_timed_out(
|
||||
&grantor_user.email,
|
||||
&grantee_user.name.clone(),
|
||||
&grantee_user.name,
|
||||
emer.get_type_as_str(),
|
||||
)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name.clone())
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
}
|
||||
@@ -802,39 +790,48 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn).await;
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await;
|
||||
|
||||
if emergency_access_list.is_empty() {
|
||||
debug!("No emergency request reminder notification to send");
|
||||
}
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
for mut emer in emergency_access_list {
|
||||
if (emer.recovery_initiated_at.is_some()
|
||||
&& Utc::now().naive_utc()
|
||||
>= emer.recovery_initiated_at.unwrap() + Duration::days((i64::from(emer.wait_time_days)) - 1))
|
||||
&& (emer.last_notification_at.is_none()
|
||||
|| (emer.last_notification_at.is_some()
|
||||
&& Utc::now().naive_utc() >= emer.last_notification_at.unwrap() + Duration::days(1)))
|
||||
{
|
||||
emer.save(&conn).await.expect("Cannot save emergency access on job");
|
||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||
// Calculate the day before the recovery will become active
|
||||
let final_recovery_reminder_at =
|
||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
|
||||
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
||||
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
||||
last_notification_at + Duration::days(1)
|
||||
} else {
|
||||
now
|
||||
};
|
||||
if final_recovery_reminder_at.le(&now) && next_recovery_reminder_at.le(&now) {
|
||||
// Only update the last notification date
|
||||
// Updating the whole record could cause issues when the emergency_request_timeout_job is also active
|
||||
emer.update_last_notification_date_and_save(&now, &mut conn)
|
||||
.await
|
||||
.expect("Unable to update emergency access notification date");
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user =
|
||||
User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found.");
|
||||
User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found");
|
||||
|
||||
// get grantee user to send Accepted email
|
||||
let grantee_user =
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn)
|
||||
.await
|
||||
.expect("Grantee user not found.");
|
||||
.expect("Grantee user not found");
|
||||
|
||||
mail::send_emergency_access_recovery_reminder(
|
||||
&grantor_user.email,
|
||||
&grantee_user.name.clone(),
|
||||
&grantee_user.name,
|
||||
emer.get_type_as_str(),
|
||||
&emer.wait_time_days.to_string(), // TODO(jjlin): This should be the number of days left.
|
||||
"1", // This notification is only triggered one day before the activation
|
||||
)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
|
336
src/api/core/events.rs
Normal file
@@ -0,0 +1,336 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use chrono::NaiveDateTime;
|
||||
use rocket::{form::FromForm, serde::json::Json, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcaseVec},
|
||||
auth::{AdminHeaders, Headers},
|
||||
db::{
|
||||
models::{Cipher, Event, UserOrganization},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
util::parse_date,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
/// ###############################################################################################################
|
||||
/// /api routes
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_org_events, get_cipher_events, get_user_events,]
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EventRange {
|
||||
start: String,
|
||||
end: String,
|
||||
#[field(name = "continuationToken")]
|
||||
continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
|
||||
#[get("/organizations/<org_id>/events?<data..>")]
|
||||
async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
} else {
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_organization_uuid(org_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
.collect()
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>/events?<data..>")]
|
||||
async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let mut events_json = Vec::with_capacity(0);
|
||||
if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, cipher_id, &mut conn).await {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
} else {
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
events_json = Event::find_by_cipher_uuid(cipher_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
.collect()
|
||||
}
|
||||
events_json
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")]
|
||||
async fn get_user_events(
|
||||
org_id: &str,
|
||||
user_org_id: &str,
|
||||
data: EventRange,
|
||||
_headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
} else {
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_org_and_user_org(org_id, user_org_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
.collect()
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
|
||||
// When the length of the vec equals the max page_size there probably is more data
|
||||
// When it is less, then all events are loaded.
|
||||
if events_json.len() as i64 == Event::PAGE_SIZE {
|
||||
if let Some(last_event) = events_json.last() {
|
||||
last_event["date"].as_str()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// ###############################################################################################################
|
||||
/// /events routes
|
||||
pub fn main_routes() -> Vec<Route> {
|
||||
routes![post_events_collect,]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EventCollection {
|
||||
// Mandatory
|
||||
Type: i32,
|
||||
Date: String,
|
||||
|
||||
// Optional
|
||||
CipherId: Option<String>,
|
||||
OrganizationId: Option<String>,
|
||||
}
|
||||
|
||||
// Upstream:
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||
#[post("/collect", format = "application/json", data = "<data>")]
|
||||
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for event in data.iter().map(|d| &d.data) {
|
||||
let event_date = parse_date(&event.Date);
|
||||
match event.Type {
|
||||
1000..=1099 => {
|
||||
_log_user_event(
|
||||
event.Type,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
1600..=1699 => {
|
||||
if let Some(org_uuid) = &event.OrganizationId {
|
||||
_log_event(
|
||||
event.Type,
|
||||
org_uuid,
|
||||
org_uuid,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if let Some(cipher_uuid) = &event.CipherId {
|
||||
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||
if let Some(org_uuid) = cipher.organization_uuid {
|
||||
_log_event(
|
||||
event.Type,
|
||||
cipher_uuid,
|
||||
&org_uuid,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return;
|
||||
}
|
||||
_log_user_event(event_type, user_uuid, device_type, None, ip, conn).await;
|
||||
}
|
||||
|
||||
async fn _log_user_event(
|
||||
event_type: i32,
|
||||
user_uuid: &str,
|
||||
device_type: i32,
|
||||
event_date: Option<NaiveDateTime>,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await;
|
||||
let mut events: Vec<Event> = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org
|
||||
|
||||
// Upstream saves the event also without any org_uuid.
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
event.user_uuid = Some(String::from(user_uuid));
|
||||
event.act_user_uuid = Some(String::from(user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
events.push(event);
|
||||
|
||||
// For each org a user is a member of store these events per org
|
||||
for org_uuid in orgs {
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
event.user_uuid = Some(String::from(user_uuid));
|
||||
event.org_uuid = Some(org_uuid);
|
||||
event.act_user_uuid = Some(String::from(user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
events.push(event);
|
||||
}
|
||||
|
||||
Event::save_user_event(events, conn).await.unwrap_or(());
|
||||
}
|
||||
|
||||
pub async fn log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: &str,
|
||||
act_user_uuid: String,
|
||||
device_type: i32,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return;
|
||||
}
|
||||
_log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn _log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: &str,
|
||||
act_user_uuid: &str,
|
||||
device_type: i32,
|
||||
event_date: Option<NaiveDateTime>,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Create a new empty event
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
match event_type {
|
||||
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
||||
// Collection Events
|
||||
1100..=1199 => {
|
||||
event.cipher_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Collection Events
|
||||
1300..=1399 => {
|
||||
event.collection_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Group Events
|
||||
1400..=1499 => {
|
||||
event.group_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Org User Events
|
||||
1500..=1599 => {
|
||||
event.org_user_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// 1600..=1699 Are organizational events, and they do not need the source_uuid
|
||||
// Policy Events
|
||||
1700..=1799 => {
|
||||
event.policy_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Ignore others
|
||||
_ => {}
|
||||
}
|
||||
|
||||
event.org_uuid = Some(String::from(org_uuid));
|
||||
event.act_user_uuid = Some(String::from(act_user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
event.save(conn).await.unwrap_or(());
|
||||
}
|
||||
|
||||
pub async fn event_cleanup_job(pool: DbPool) {
|
||||
debug!("Start events cleanup job");
|
||||
if CONFIG.events_days_retain().is_none() {
|
||||
debug!("events_days_retain is not configured, abort");
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
Event::clean_events(&mut conn).await.ok();
|
||||
} else {
|
||||
error!("Failed to get DB connection while trying to cleanup the events table")
|
||||
}
|
||||
}
|
@@ -12,8 +12,8 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
}
|
||||
|
||||
#[get("/folders")]
|
||||
async fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn).await;
|
||||
async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
Json(json!({
|
||||
@@ -24,8 +24,8 @@ async fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
}
|
||||
|
||||
#[get("/folders/<uuid>")]
|
||||
async fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn).await {
|
||||
async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -44,20 +44,20 @@ pub struct FolderData {
|
||||
}
|
||||
|
||||
#[post("/folders", data = "<data>")]
|
||||
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
||||
|
||||
folder.save(&conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &folder).await;
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>", data = "<data>")]
|
||||
async fn post_folder(
|
||||
uuid: String,
|
||||
uuid: &str,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
@@ -68,15 +68,15 @@ async fn post_folder(
|
||||
|
||||
#[put("/folders/<uuid>", data = "<data>")]
|
||||
async fn put_folder(
|
||||
uuid: String,
|
||||
uuid: &str,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = match Folder::find_by_uuid(&uuid, &conn).await {
|
||||
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -87,20 +87,20 @@ async fn put_folder(
|
||||
|
||||
folder.name = data.Name;
|
||||
|
||||
folder.save(&conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder).await;
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>/delete")]
|
||||
async fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
async fn delete_folder_post(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[delete("/folders/<uuid>")]
|
||||
async fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn).await {
|
||||
async fn delete_folder(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -110,8 +110,8 @@ async fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify<
|
||||
}
|
||||
|
||||
// Delete the actual folder entry
|
||||
folder.delete(&conn).await?;
|
||||
folder.delete(&mut conn).await?;
|
||||
|
||||
nt.send_folder_update(UpdateType::FolderDelete, &folder).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device.uuid, &mut conn).await;
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -1,32 +1,34 @@
|
||||
pub mod accounts;
|
||||
mod ciphers;
|
||||
mod emergency_access;
|
||||
mod events;
|
||||
mod folders;
|
||||
mod organizations;
|
||||
mod public;
|
||||
mod sends;
|
||||
pub mod two_factor;
|
||||
|
||||
pub use ciphers::purge_trashed_ciphers;
|
||||
pub use ciphers::{CipherSyncData, CipherSyncType};
|
||||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||
pub use sends::purge_sends;
|
||||
pub use two_factor::send_incomplete_2fa_notifications;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut device_token_routes = routes![clear_device_token, put_device_token];
|
||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||
let mut hibp_routes = routes![hibp_breach];
|
||||
let mut meta_routes = routes![alive, now, version];
|
||||
let mut meta_routes = routes![alive, now, version, config];
|
||||
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut accounts::routes());
|
||||
routes.append(&mut ciphers::routes());
|
||||
routes.append(&mut emergency_access::routes());
|
||||
routes.append(&mut events::routes());
|
||||
routes.append(&mut folders::routes());
|
||||
routes.append(&mut organizations::routes());
|
||||
routes.append(&mut two_factor::routes());
|
||||
routes.append(&mut sends::routes());
|
||||
routes.append(&mut device_token_routes);
|
||||
routes.append(&mut public::routes());
|
||||
routes.append(&mut eq_domains_routes);
|
||||
routes.append(&mut hibp_routes);
|
||||
routes.append(&mut meta_routes);
|
||||
@@ -34,52 +36,27 @@ pub fn routes() -> Vec<Route> {
|
||||
routes
|
||||
}
|
||||
|
||||
pub fn events_routes() -> Vec<Route> {
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut events::main_routes());
|
||||
|
||||
routes
|
||||
}
|
||||
|
||||
//
|
||||
// Move this somewhere else
|
||||
//
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket::{serde::json::Json, Catcher, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{JsonResult, JsonUpcase},
|
||||
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
error::Error,
|
||||
util::get_reqwest_client,
|
||||
};
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
fn clear_device_token(uuid: String) -> &'static str {
|
||||
// This endpoint doesn't have auth header
|
||||
|
||||
let _ = uuid;
|
||||
// uuid is not related to deviceId
|
||||
|
||||
// This only clears push token
|
||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
""
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> Json<Value> {
|
||||
let _data: Value = data.into_inner().data;
|
||||
// Data has a single string value "PushToken"
|
||||
let _ = uuid;
|
||||
// uuid is not related to deviceId
|
||||
|
||||
// TODO: This should save the push token, but we don't have push functionality
|
||||
|
||||
Json(json!({
|
||||
"Id": headers.device.uuid,
|
||||
"Name": headers.device.name,
|
||||
"Type": headers.device.atype,
|
||||
"Identifier": headers.device.uuid,
|
||||
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct GlobalDomain {
|
||||
@@ -127,7 +104,12 @@ struct EquivDomainData {
|
||||
}
|
||||
|
||||
#[post("/settings/domains", data = "<data>")]
|
||||
async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_eq_domains(
|
||||
data: JsonUpcase<EquivDomainData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: EquivDomainData = data.into_inner().data;
|
||||
|
||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||
@@ -139,21 +121,27 @@ async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, co
|
||||
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string());
|
||||
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string());
|
||||
|
||||
user.save(&conn).await?;
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
nt.send_user_update(UpdateType::SyncSettings, &user).await;
|
||||
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
#[put("/settings/domains", data = "<data>")]
|
||||
async fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn).await
|
||||
async fn put_eq_domains(
|
||||
data: JsonUpcase<EquivDomainData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
async fn hibp_breach(username: String) -> JsonResult {
|
||||
async fn hibp_breach(username: &str) -> JsonResult {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||
username
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
||||
);
|
||||
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
@@ -175,7 +163,7 @@ async fn hibp_breach(username: String) -> JsonResult {
|
||||
"Domain": "haveibeenpwned.com",
|
||||
"BreachDate": "2019-08-18T00:00:00Z",
|
||||
"AddedDate": "2019-08-18T00:00:00Z",
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
||||
"LogoPath": "vw_static/hibp.png",
|
||||
"PwnCount": 0,
|
||||
"DataClasses": [
|
||||
@@ -200,3 +188,39 @@ pub fn now() -> Json<String> {
|
||||
fn version() -> Json<&'static str> {
|
||||
Json(crate::VERSION.unwrap_or_default())
|
||||
}
|
||||
|
||||
#[get("/config")]
|
||||
fn config() -> Json<Value> {
|
||||
let domain = crate::CONFIG.domain();
|
||||
Json(json!({
|
||||
"version": crate::VERSION,
|
||||
"gitHash": option_env!("GIT_REV"),
|
||||
"server": {
|
||||
"name": "Vaultwarden",
|
||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
||||
},
|
||||
"environment": {
|
||||
"vault": domain,
|
||||
"api": format!("{domain}/api"),
|
||||
"identity": format!("{domain}/identity"),
|
||||
"notifications": format!("{domain}/notifications"),
|
||||
"sso": "",
|
||||
},
|
||||
"object": "config",
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
catchers![api_not_found]
|
||||
}
|
||||
|
||||
#[catch(404)]
|
||||
fn api_not_found() -> Json<Value> {
|
||||
Json(json!({
|
||||
"error": {
|
||||
"code": 404,
|
||||
"reason": "Not Found",
|
||||
"description": "The requested resource could not be found."
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
238
src/api/core/public.rs
Normal file
@@ -0,0 +1,238 @@
|
||||
use chrono::Utc;
|
||||
use rocket::{
|
||||
request::{self, FromRequest, Outcome},
|
||||
Request, Route,
|
||||
};
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonUpcase},
|
||||
auth,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![ldap_import]
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportGroupData {
|
||||
Name: String,
|
||||
ExternalId: String,
|
||||
MemberExternalIds: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportUserData {
|
||||
Email: String,
|
||||
ExternalId: String,
|
||||
Deleted: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportData {
|
||||
Groups: Vec<OrgImportGroupData>,
|
||||
Members: Vec<OrgImportUserData>,
|
||||
OverwriteExisting: bool,
|
||||
// LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
|
||||
}
|
||||
|
||||
#[post("/public/organization/import", data = "<data>")]
|
||||
async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
|
||||
// Most of the logic for this function can be found here
|
||||
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
|
||||
|
||||
let org_id = token.0;
|
||||
let data = data.into_inner().data;
|
||||
|
||||
for user_data in &data.Members {
|
||||
if user_data.Deleted {
|
||||
// If user is marked for deletion and it exists, revoke it
|
||||
if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||
{
|
||||
user_org.revoke();
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
// If user is part of the organization, restore it
|
||||
} else if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||
{
|
||||
if user_org.status < UserOrgStatus::Revoked as i32 {
|
||||
user_org.restore();
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
} else {
|
||||
// If user is not part of the organization
|
||||
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
||||
Some(user) => user, // exists in vaultwarden
|
||||
None => {
|
||||
// doesn't exist in vaultwarden
|
||||
let mut new_user = User::new(user_data.Email.clone());
|
||||
new_user.set_external_id(Some(user_data.ExternalId.clone()));
|
||||
new_user.save(&mut conn).await?;
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
let invitation = Invitation::new(&new_user.email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
new_user
|
||||
}
|
||||
};
|
||||
let user_org_status = if CONFIG.mail_enabled() {
|
||||
UserOrgStatus::Invited as i32
|
||||
} else {
|
||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||
};
|
||||
|
||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||
new_org_user.access_all = false;
|
||||
new_org_user.atype = UserOrgType::User as i32;
|
||||
new_org_user.status = user_org_status;
|
||||
|
||||
new_org_user.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await {
|
||||
Some(org) => (org.name, org.billing_email),
|
||||
None => err!("Error looking up organization"),
|
||||
};
|
||||
|
||||
mail::send_invite(
|
||||
&user_data.Email,
|
||||
&user.uuid,
|
||||
Some(org_id.clone()),
|
||||
Some(new_org_user.uuid),
|
||||
&org_name,
|
||||
Some(org_email),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.org_groups_enabled() {
|
||||
for group_data in &data.Groups {
|
||||
let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await {
|
||||
Some(group) => group.uuid,
|
||||
None => {
|
||||
let mut group =
|
||||
Group::new(org_id.clone(), group_data.Name.clone(), false, Some(group_data.ExternalId.clone()));
|
||||
group.save(&mut conn).await?;
|
||||
group.uuid
|
||||
}
|
||||
};
|
||||
|
||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||
|
||||
for ext_id in &group_data.MemberExternalIds {
|
||||
if let Some(user) = User::find_by_external_id(ext_id, &mut conn).await {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await
|
||||
{
|
||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||
group_user.save(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Group support is disabled, groups will not be imported!");
|
||||
}
|
||||
|
||||
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||
if data.OverwriteExisting {
|
||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
||||
if let Some(user_external_id) =
|
||||
User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.external_id)
|
||||
{
|
||||
if user_external_id.is_some() && !sync_members.contains(&user_external_id.unwrap()) {
|
||||
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
||||
// Removing owner, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
||||
.await
|
||||
<= 1
|
||||
{
|
||||
warn!("Can't delete the last owner");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
user_org.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct PublicToken(String);
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for PublicToken {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
// Get access_token
|
||||
let access_token: &str = match headers.get_one("Authorization") {
|
||||
Some(a) => match a.rsplit("Bearer ").next() {
|
||||
Some(split) => split,
|
||||
None => err_handler!("No access token provided"),
|
||||
},
|
||||
None => err_handler!("No access token provided"),
|
||||
};
|
||||
// Check JWT token is valid and get device and user from it
|
||||
let claims = match auth::decode_api_org(access_token) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => err_handler!("Invalid claim"),
|
||||
};
|
||||
// Check if time is between claims.nbf and claims.exp
|
||||
let time_now = Utc::now().naive_utc().timestamp();
|
||||
if time_now < claims.nbf {
|
||||
err_handler!("Token issued in the future");
|
||||
}
|
||||
if time_now > claims.exp {
|
||||
err_handler!("Token expired");
|
||||
}
|
||||
// Check if claims.iss is host|claims.scope[0]
|
||||
let host = match auth::Host::from_request(request).await {
|
||||
Outcome::Success(host) => host,
|
||||
_ => err_handler!("Error getting Host"),
|
||||
};
|
||||
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
|
||||
if complete_host != claims.iss {
|
||||
err_handler!("Token not issued by this server");
|
||||
}
|
||||
|
||||
// Check if claims.sub is org_api_key.uuid
|
||||
// Check if claims.client_sub is org_api_key.org_uuid
|
||||
let conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
let org_uuid = match claims.client_id.strip_prefix("organization.") {
|
||||
Some(uuid) => uuid,
|
||||
None => err_handler!("Malformed client_id"),
|
||||
};
|
||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, &conn).await {
|
||||
Some(org_api_key) => org_api_key,
|
||||
None => err_handler!("Invalid client_id"),
|
||||
};
|
||||
if org_api_key.org_uuid != claims.client_sub {
|
||||
err_handler!("Token not issued for this org");
|
||||
}
|
||||
if org_api_key.uuid != claims.sub {
|
||||
err_handler!("Token not issued for this client");
|
||||
}
|
||||
|
||||
Outcome::Success(PublicToken(claims.client_sub))
|
||||
}
|
||||
}
|