mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 18:55:57 +03:00
Compare commits
357 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
0c1f0bad17 | ||
|
72cf59fa54 | ||
|
527bc1f625 | ||
|
2168d09421 | ||
|
1c266031d7 | ||
|
b636d20c64 | ||
|
2a9ca88c2a | ||
|
b9c434addb | ||
|
451ad47327 | ||
|
7f61dd5fe3 | ||
|
3ca85028ea | ||
|
542a73cc6e | ||
|
78d07e2fda | ||
|
b617ffd2af | ||
|
3abf173d89 | ||
|
df8aeb10e8 | ||
|
26ad06df7c | ||
|
37fff3ef4a | ||
|
28c5e63bf5 | ||
|
a07c213b3e | ||
|
ed72741f48 | ||
|
fb0c23b71f | ||
|
d98f95f536 | ||
|
6643e83b61 | ||
|
7b742009a1 | ||
|
649e2b48f3 | ||
|
81f0c2b0e8 | ||
|
80d8aa7239 | ||
|
27d4b713f6 | ||
|
b0faaf2527 | ||
|
8d06d9c111 | ||
|
c4d565b15b | ||
|
06f8e69c70 | ||
|
7db52374cd | ||
|
843f205f6f | ||
|
2ff51ae77e | ||
|
2b75d81a8b | ||
|
cad0dcbed1 | ||
|
19b8388950 | ||
|
87e08b9e50 | ||
|
0b7d6bf6df | ||
|
89fe05b6cc | ||
|
d73d74e78f | ||
|
9a682b7a45 | ||
|
94201ca133 | ||
|
99f9e7252a | ||
|
42136a7097 | ||
|
9bb4c38bf9 | ||
|
5f01db69ff | ||
|
c59a7f4a8c | ||
|
8295688bed | ||
|
9713a3a555 | ||
|
d781981bbd | ||
|
5125fdb882 | ||
|
fd9693b961 | ||
|
f38926d666 | ||
|
775d07e9a0 | ||
|
2d5f172e77 | ||
|
08f0de7b46 | ||
|
45122bed9e | ||
|
0876d4a5fd | ||
|
7d552dbdc8 | ||
|
9a60eb04c2 | ||
|
1b99da91fb | ||
|
a64a400c9c | ||
|
85c0aa1619 | ||
|
19e78e3509 | ||
|
bf6330374c | ||
|
e639d9063b | ||
|
4a88e7ec78 | ||
|
65dad5a9d1 | ||
|
ba9ad14fbb | ||
|
62c7a4d491 | ||
|
14e3dcad8e | ||
|
f4a9645b54 | ||
|
8f7900759f | ||
|
69ee4a70b4 | ||
|
e4e16ed50f | ||
|
a16c656770 | ||
|
76b7de15de | ||
|
8ba6e61fd5 | ||
|
a30a1c9703 | ||
|
76687e2df7 | ||
|
bf5aefd129 | ||
|
1fa178d1d3 | ||
|
b7eedbcddc | ||
|
920371929b | ||
|
6ddbe84bde | ||
|
690d0ed1bb | ||
|
248e7dabc2 | ||
|
4584cfe3c1 | ||
|
cc646b1519 | ||
|
e501dc6d0e | ||
|
85ac9783f0 | ||
|
5b430f22bc | ||
|
d4eb21c2d9 | ||
|
6bf8a9d93d | ||
|
605419ae1b | ||
|
b89ffb2731 | ||
|
bda123b1eb | ||
|
2c94ea075c | ||
|
4bd8eae07e | ||
|
5529264c3f | ||
|
0a5df06e77 | ||
|
2f9ac61a4e | ||
|
840cf8740a | ||
|
d8869adf52 | ||
|
a631fc0077 | ||
|
9e4d372213 | ||
|
d0bf0ab237 | ||
|
e327583aa5 | ||
|
ead2f02cbd | ||
|
c453528dc1 | ||
|
6ae48aa8c2 | ||
|
88643fd9d5 | ||
|
73e0002219 | ||
|
c49ee47de0 | ||
|
14408396bb | ||
|
6cbb724069 | ||
|
a2316ca091 | ||
|
c476e19796 | ||
|
9f393cfd9d | ||
|
450c4d4d97 | ||
|
75e62abed0 | ||
|
97f9eb1320 | ||
|
53cc8a65af | ||
|
f94ac6ca61 | ||
|
cee3fd5ba2 | ||
|
016fe2269e | ||
|
03c0a5e405 | ||
|
cbbed79036 | ||
|
4af81ec50e | ||
|
a5ba67fef2 | ||
|
4cebe1fff4 | ||
|
a984dbbdf3 | ||
|
881524bd54 | ||
|
44da9e6ca7 | ||
|
4c0c8f7432 | ||
|
f67854c59c | ||
|
a1c1b9ab3b | ||
|
395979e834 | ||
|
fce6cb5865 | ||
|
338756550a | ||
|
d014eede9a | ||
|
9930a0d752 | ||
|
9928a5404b | ||
|
a6e0ddcdf1 | ||
|
acab70ed89 | ||
|
c0d149060f | ||
|
344f00d9c9 | ||
|
b26afb970a | ||
|
34ed5ce4b3 | ||
|
9375d5b8c2 | ||
|
e3678b4b56 | ||
|
b4c95fb4ac | ||
|
0bb33e04bb | ||
|
4d33e24099 | ||
|
2cdce04662 | ||
|
756d108f6a | ||
|
ca20b3d80c | ||
|
4ab9362971 | ||
|
4e8828e41a | ||
|
f8d1cfad2a | ||
|
b0a411b733 | ||
|
81741647f3 | ||
|
f36bd72a7f | ||
|
8c10de3edd | ||
|
0ab10a7c43 | ||
|
a1a5e00ff5 | ||
|
8af4b593fa | ||
|
9bef2c120c | ||
|
f7d99c43b5 | ||
|
ca0fd7a31b | ||
|
9e1550af8e | ||
|
a99c9715f6 | ||
|
1a888b5355 | ||
|
10d5c7738a | ||
|
80f23e6d78 | ||
|
d5ed2ce6df | ||
|
5e649f0d0d | ||
|
612c0e9478 | ||
|
0d2b3bfb99 | ||
|
c934838ace | ||
|
4350e9d241 | ||
|
0cdc0cb147 | ||
|
20535065d7 | ||
|
a23f4a704b | ||
|
93f2f74767 | ||
|
37ca202247 | ||
|
37525b1e7e | ||
|
d594b5a266 | ||
|
41add45e67 | ||
|
08b168a0a1 | ||
|
978ef2bc8b | ||
|
881d1f4334 | ||
|
56b4f46d7d | ||
|
f6bd8b3462 | ||
|
1f0f64d961 | ||
|
42ba817a4c | ||
|
dd98fe860b | ||
|
1fe9f101be | ||
|
c68fbb41d2 | ||
|
91e80657e4 | ||
|
2db30f918e | ||
|
cfceac3909 | ||
|
58b046fd10 | ||
|
227779256c | ||
|
89b5f7c98d | ||
|
c666497130 | ||
|
2620a1ac8c | ||
|
ffdcafa044 | ||
|
56ffec40f4 | ||
|
96c2416903 | ||
|
340d42a1ca | ||
|
e19420160f | ||
|
1741316f42 | ||
|
4f08167d6f | ||
|
fef76e2f6f | ||
|
f16d56cb27 | ||
|
120b286f2b | ||
|
7f437b6947 | ||
|
8d6e62e18b | ||
|
d0ec410b73 | ||
|
c546a59c38 | ||
|
e5ec245626 | ||
|
6ea95d1ede | ||
|
88bea44dd8 | ||
|
8ee5d51bd4 | ||
|
c640abbcd7 | ||
|
13598c098f | ||
|
a622b4d2fb | ||
|
403f35b571 | ||
|
3968bc8016 | ||
|
ff66368cb6 | ||
|
3fb419e704 | ||
|
832f838ddd | ||
|
18703bf195 | ||
|
ff8e88a5df | ||
|
72e1946ce5 | ||
|
ee391720aa | ||
|
e3a2dfffab | ||
|
8bf1278b1b | ||
|
00ce943ea5 | ||
|
b67eacdfde | ||
|
0dcea75764 | ||
|
0c5532d8b5 | ||
|
46e0f3c43a | ||
|
2cd17fe7af | ||
|
f44b2611e6 | ||
|
82fee0ede3 | ||
|
49579e4ce7 | ||
|
9254cf9d9c | ||
|
ff0fee3690 | ||
|
0778bd4bd5 | ||
|
0cd065d354 | ||
|
8615736e84 | ||
|
5772836be5 | ||
|
c380d9c379 | ||
|
cea7a30d82 | ||
|
06cde29419 | ||
|
20f5988174 | ||
|
b491cfe0b0 | ||
|
fc513413ea | ||
|
3f7e4712cd | ||
|
c2ef331df9 | ||
|
5fef7983f4 | ||
|
29ed82a359 | ||
|
7d5186e40a | ||
|
99270612ba | ||
|
c7b5b6ee07 | ||
|
848d17ffb9 | ||
|
47e8aa29e1 | ||
|
f270f2ed65 | ||
|
aba5b234af | ||
|
9133e2927d | ||
|
38104ba7cf | ||
|
c42bcae224 | ||
|
764e51bbe9 | ||
|
8e6c6a1dc4 | ||
|
7a9cfc45da | ||
|
9e24b9065c | ||
|
1c2b376ca2 | ||
|
746ce2afb4 | ||
|
029008bad5 | ||
|
d3449bfa00 | ||
|
a9a5706764 | ||
|
3ff8014add | ||
|
e60bdc7efe | ||
|
cccd8262fa | ||
|
68e5d95d25 | ||
|
5f458b288a | ||
|
e9ee8ac2fa | ||
|
8a4dfc3bbe | ||
|
4f86517501 | ||
|
7cb19ef767 | ||
|
565439a914 | ||
|
b8010be26b | ||
|
f76b8a32ca | ||
|
39167d333a | ||
|
0d63132987 | ||
|
7b5d5d1302 | ||
|
0dc98bda23 | ||
|
f9a062cac8 | ||
|
6ad4ccd901 | ||
|
ee6ceaa923 | ||
|
20b393d354 | ||
|
f707f86c8e | ||
|
daea54b288 | ||
|
1e5306b820 | ||
|
6890c25ea1 | ||
|
48482fece0 | ||
|
1dc1d4df72 | ||
|
2b4dd6f137 | ||
|
cc021a4784 | ||
|
e3c4609c2a | ||
|
3da44a8d30 | ||
|
34ea10475d | ||
|
89a68741d6 | ||
|
2421d49d9a | ||
|
ced7f1771a | ||
|
af2235bf88 | ||
|
305de2e2cd | ||
|
8756c5c255 | ||
|
27609ac4cc | ||
|
95d906bdbb | ||
|
4bb0d7bc05 | ||
|
d9599155ae | ||
|
1db37bf3d0 | ||
|
d75a80bd2d | ||
|
244bad3a24 | ||
|
f7056bcaa5 | ||
|
994669fb69 | ||
|
3ab90259f2 | ||
|
155109dea1 | ||
|
b268c3dd1c | ||
|
4e64dbdde4 | ||
|
a2955daffe | ||
|
d3921b973b | ||
|
cf6ad3cb15 | ||
|
90e0b7fec6 | ||
|
d77333576b | ||
|
73ff8d79f7 | ||
|
95fc88ae5b | ||
|
1d0eaac260 | ||
|
3565bfc939 | ||
|
a82c04910f | ||
|
233f03ca2b | ||
|
93c881a7a9 | ||
|
0af3956abd | ||
|
15feff3e79 | ||
|
5c5700caa7 | ||
|
3bddc176d6 | ||
|
9caf4bf383 | ||
|
9b2234fa0e | ||
|
1f79fdec4e | ||
|
a56f4c97e4 | ||
|
3a3390963c |
@@ -3,11 +3,18 @@ target
|
|||||||
|
|
||||||
# Data folder
|
# Data folder
|
||||||
data
|
data
|
||||||
|
|
||||||
|
# Misc
|
||||||
.env
|
.env
|
||||||
|
.env.template
|
||||||
|
.gitattributes
|
||||||
|
.gitignore
|
||||||
|
rustfmt.toml
|
||||||
|
|
||||||
# IDE files
|
# IDE files
|
||||||
.vscode
|
.vscode
|
||||||
.idea
|
.idea
|
||||||
|
.editorconfig
|
||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
@@ -17,9 +24,17 @@ data
|
|||||||
*.yml
|
*.yml
|
||||||
*.yaml
|
*.yaml
|
||||||
|
|
||||||
# Docker folders
|
# Docker
|
||||||
hooks
|
hooks
|
||||||
tools
|
tools
|
||||||
|
Dockerfile
|
||||||
|
.dockerignore
|
||||||
|
docker/**
|
||||||
|
!docker/healthcheck.sh
|
||||||
|
!docker/start.sh
|
||||||
|
|
||||||
# Web vault
|
# Web vault
|
||||||
web-vault
|
web-vault
|
||||||
|
|
||||||
|
# Vaultwarden Resources
|
||||||
|
resources
|
||||||
|
23
.editorconfig
Normal file
23
.editorconfig
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# EditorConfig is awesome: https://EditorConfig.org
|
||||||
|
|
||||||
|
# top-most EditorConfig file
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
charset = utf-8
|
||||||
|
|
||||||
|
[*.{rs,py}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[*.{yml,yaml}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[Makefile]
|
||||||
|
indent_style = tab
|
146
.env.template
146
.env.template
@@ -1,8 +1,13 @@
|
|||||||
## Bitwarden_RS Configuration File
|
## Vaultwarden Configuration File
|
||||||
## Uncomment any of the following lines to change the defaults
|
## Uncomment any of the following lines to change the defaults
|
||||||
##
|
##
|
||||||
## Be aware that most of these settings will be overridden if they were changed
|
## Be aware that most of these settings will be overridden if they were changed
|
||||||
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||||
|
##
|
||||||
|
## By default, vaultwarden expects for this file to be named ".env" and located
|
||||||
|
## in the current working directory. If this is not the case, the environment
|
||||||
|
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||||
|
## vaultwarden.
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
@@ -24,6 +29,15 @@
|
|||||||
## Define the size of the connection pool used for connecting to the database.
|
## Define the size of the connection pool used for connecting to the database.
|
||||||
# DATABASE_MAX_CONNS=10
|
# DATABASE_MAX_CONNS=10
|
||||||
|
|
||||||
|
## Database connection initialization
|
||||||
|
## Allows SQL statements to be run whenever a new database connection is created.
|
||||||
|
## This is mainly useful for connection-scoped pragmas.
|
||||||
|
## If empty, a database-specific default is used:
|
||||||
|
## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;"
|
||||||
|
## - MySQL: ""
|
||||||
|
## - PostgreSQL: ""
|
||||||
|
# DATABASE_CONN_INIT=""
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
@@ -36,9 +50,9 @@
|
|||||||
## Automatically reload the templates for every request, slow, use only for development
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
# RELOAD_TEMPLATES=false
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Client-IP"
|
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
# IP_HEADER=X-Client-IP
|
# IP_HEADER=X-Real-IP
|
||||||
|
|
||||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
# ICON_CACHE_TTL=2592000
|
# ICON_CACHE_TTL=2592000
|
||||||
@@ -56,6 +70,44 @@
|
|||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||||
# WEBSOCKET_PORT=3012
|
# WEBSOCKET_PORT=3012
|
||||||
|
|
||||||
|
## Controls whether users are allowed to create Bitwarden Sends.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||||
|
# SENDS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether users can enable emergency access to their accounts.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# EMERGENCY_ACCESS_ALLOWED=true
|
||||||
|
|
||||||
|
## Job scheduler settings
|
||||||
|
##
|
||||||
|
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
|
||||||
|
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||||
|
##
|
||||||
|
## How often (in ms) the job scheduler thread checks for jobs that need running.
|
||||||
|
## Set to 0 to globally disable scheduled jobs.
|
||||||
|
# JOB_POLL_INTERVAL_MS=30000
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that checks for Sends past their deletion date.
|
||||||
|
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||||
|
# SEND_PURGE_SCHEDULE="0 5 * * * *"
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that checks for trashed items to delete permanently.
|
||||||
|
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
|
||||||
|
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that checks for incomplete 2FA logins.
|
||||||
|
## Defaults to once every minute. Set blank to disable this job.
|
||||||
|
# INCOMPLETE_2FA_SCHEDULE="30 * * * * *"
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that sends expiration reminders to emergency access grantors.
|
||||||
|
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||||
|
# EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE="0 5 * * * *"
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that grants emergency access requests that have met the required wait time.
|
||||||
|
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||||
|
# EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 5 * * * *"
|
||||||
|
|
||||||
## Enable extended logging, which shows timestamps and targets in the logs
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
# EXTENDED_LOGGING=true
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
@@ -82,7 +134,7 @@
|
|||||||
## Enable WAL for the DB
|
## Enable WAL for the DB
|
||||||
## Set to false to avoid enabling WAL during startup.
|
## Set to false to avoid enabling WAL during startup.
|
||||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||||
## this setting only prevents bitwarden_rs from automatically enabling it on start.
|
## this setting only prevents vaultwarden from automatically enabling it on start.
|
||||||
## Please read project wiki page about this setting first before changing the value as it can
|
## Please read project wiki page about this setting first before changing the value as it can
|
||||||
## cause performance degradation or might render the service unable to start.
|
## cause performance degradation or might render the service unable to start.
|
||||||
# ENABLE_DB_WAL=true
|
# ENABLE_DB_WAL=true
|
||||||
@@ -91,10 +143,32 @@
|
|||||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||||
# DB_CONNECTION_RETRIES=15
|
# DB_CONNECTION_RETRIES=15
|
||||||
|
|
||||||
|
## Icon service
|
||||||
|
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||||
|
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||||
|
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||||
|
##
|
||||||
|
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||||
|
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||||
|
## redirect to the corresponding icon at the external service. An external service may
|
||||||
|
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||||
|
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||||
|
## for certain sites have been cached.
|
||||||
|
# ICON_SERVICE=internal
|
||||||
|
|
||||||
|
## Icon redirect code
|
||||||
|
## The HTTP status code to use for redirects to an external icon service.
|
||||||
|
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||||
|
## Temporary redirects are useful while testing different icon services, but once a service
|
||||||
|
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||||
|
## are currently better supported by the Bitwarden clients.
|
||||||
|
# ICON_REDIRECT_CODE=302
|
||||||
|
|
||||||
## Disable icon downloading
|
## Disable icon downloading
|
||||||
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
## Set to true to disable icon downloading in the internal icon service.
|
||||||
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
## This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external
|
||||||
## otherwise it will delete them and they won't be downloaded again.
|
## network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons
|
||||||
|
## will be deleted eventually, but won't be downloaded again.
|
||||||
# DISABLE_ICON_DOWNLOAD=false
|
# DISABLE_ICON_DOWNLOAD=false
|
||||||
|
|
||||||
## Icon download timeout
|
## Icon download timeout
|
||||||
@@ -125,7 +199,7 @@
|
|||||||
# EMAIL_EXPIRATION_TIME=600
|
# EMAIL_EXPIRATION_TIME=600
|
||||||
|
|
||||||
## Email token size
|
## Email token size
|
||||||
## Number of digits in an email token (min: 6, max: 19).
|
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||||
# EMAIL_TOKEN_SIZE=6
|
# EMAIL_TOKEN_SIZE=6
|
||||||
|
|
||||||
@@ -170,22 +244,37 @@
|
|||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
# INVITATIONS_ALLOWED=true
|
# INVITATIONS_ALLOWED=true
|
||||||
## Name shown in the invitation emails that don't come from a specific organization
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
# INVITATION_ORG_NAME=Bitwarden_RS
|
# INVITATION_ORG_NAME=Vaultwarden
|
||||||
|
|
||||||
## Per-organization attachment limit (KB)
|
## Per-organization attachment storage limit (KB)
|
||||||
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
## Max kilobytes of attachment storage allowed per organization.
|
||||||
|
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||||
# ORG_ATTACHMENT_LIMIT=
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
## Per-user attachment limit (KB).
|
## Per-user attachment storage limit (KB)
|
||||||
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
## Max kilobytes of attachment storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||||
# USER_ATTACHMENT_LIMIT=
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
|
||||||
|
## Number of days to wait before auto-deleting a trashed item.
|
||||||
|
## If unset (the default), trashed items are not auto-deleted.
|
||||||
|
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||||
|
# TRASH_AUTO_DELETE_DAYS=
|
||||||
|
|
||||||
|
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||||
|
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||||
|
## master password was provided but the required 2FA step was not completed, which
|
||||||
|
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||||
|
|
||||||
## Controls the PBBKDF password iterations to apply on the server
|
## Controls the PBBKDF password iterations to apply on the server
|
||||||
## The change only applies when the password is changed
|
## The change only applies when the password is changed
|
||||||
# PASSWORD_ITERATIONS=100000
|
# PASSWORD_ITERATIONS=100000
|
||||||
|
|
||||||
## Whether password hint should be sent into the error response when the client request it
|
## Controls whether a password hint should be shown directly in the web page if
|
||||||
# SHOW_PASSWORD_HINT=true
|
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||||
|
## as this provides unauthenticated access to potentially sensitive data.
|
||||||
|
# SHOW_PASSWORD_HINT=false
|
||||||
|
|
||||||
## Domain settings
|
## Domain settings
|
||||||
## The domain must match the address from where you access the server
|
## The domain must match the address from where you access the server
|
||||||
@@ -201,6 +290,17 @@
|
|||||||
## Multiple values must be separated with a whitespace.
|
## Multiple values must be separated with a whitespace.
|
||||||
# ALLOWED_IFRAME_ANCESTORS=
|
# ALLOWED_IFRAME_ANCESTORS=
|
||||||
|
|
||||||
|
## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in.
|
||||||
|
# LOGIN_RATELIMIT_SECONDS=60
|
||||||
|
## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`.
|
||||||
|
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||||
|
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||||
|
|
||||||
|
## Number of seconds, on average, between admin requests from the same IP address before rate limiting kicks in.
|
||||||
|
# ADMIN_RATELIMIT_SECONDS=300
|
||||||
|
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||||
|
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||||
|
|
||||||
## Yubico (Yubikey) Settings
|
## Yubico (Yubikey) Settings
|
||||||
## Set your Client ID and Secret Key for Yubikey OTP
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||||
@@ -232,21 +332,21 @@
|
|||||||
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
||||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
||||||
|
|
||||||
## Rocket specific settings, check Rocket documentation to learn more
|
## Rocket specific settings
|
||||||
# ROCKET_ENV=staging
|
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
|
||||||
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
|
# ROCKET_ADDRESS=0.0.0.0
|
||||||
# ROCKET_PORT=8000
|
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
||||||
|
# ROCKET_WORKERS=10
|
||||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||||
|
|
||||||
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
||||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||||
# SMTP_HOST=smtp.domain.tld
|
# SMTP_HOST=smtp.domain.tld
|
||||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
# SMTP_FROM=vaultwarden@domain.tld
|
||||||
# SMTP_FROM_NAME=Bitwarden_RS
|
# SMTP_FROM_NAME=Vaultwarden
|
||||||
|
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
|
||||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||||
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
|
||||||
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
|
||||||
# SMTP_USERNAME=username
|
# SMTP_USERNAME=username
|
||||||
# SMTP_PASSWORD=password
|
# SMTP_PASSWORD=password
|
||||||
# SMTP_TIMEOUT=15
|
# SMTP_TIMEOUT=15
|
||||||
|
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,3 +1,3 @@
|
|||||||
# Ignore vendored scripts in GitHub stats
|
# Ignore vendored scripts in GitHub stats
|
||||||
src/static/* linguist-vendored
|
src/static/scripts/* linguist-vendored
|
||||||
|
|
||||||
|
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
name: Bug report
|
name: Bug report
|
||||||
about: Use this ONLY for bugs in bitwarden_rs itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
||||||
title: ''
|
title: ''
|
||||||
labels: ''
|
labels: ''
|
||||||
assignees: ''
|
assignees: ''
|
||||||
@@ -8,11 +8,11 @@ assignees: ''
|
|||||||
---
|
---
|
||||||
<!--
|
<!--
|
||||||
# ###
|
# ###
|
||||||
NOTE: Please update to the latest version of bitwarden_rs before reporting an issue!
|
NOTE: Please update to the latest version of vaultwarden before reporting an issue!
|
||||||
This saves you and us a lot of time and troubleshooting.
|
This saves you and us a lot of time and troubleshooting.
|
||||||
See:
|
See:
|
||||||
* https://github.com/dani-garcia/bitwarden_rs/issues/1180
|
* https://github.com/dani-garcia/vaultwarden/issues/1180
|
||||||
* https://github.com/dani-garcia/bitwarden_rs/wiki/Updating-the-bitwarden-image
|
* https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image
|
||||||
# ###
|
# ###
|
||||||
-->
|
-->
|
||||||
|
|
||||||
@@ -37,9 +37,9 @@ such as passwords, IP addresses, and DNS names as appropriate.
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
||||||
<!-- This is NOT the version number shown on the web vault, which is versioned separately from bitwarden_rs -->
|
<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden -->
|
||||||
<!-- Remember to check if your issue exists on the latest version first! -->
|
<!-- Remember to check if your issue exists on the latest version first! -->
|
||||||
* bitwarden_rs version:
|
* vaultwarden version:
|
||||||
|
|
||||||
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
||||||
* Install method:
|
* Install method:
|
||||||
@@ -54,7 +54,7 @@ such as passwords, IP addresses, and DNS names as appropriate.
|
|||||||
|
|
||||||
### Steps to reproduce
|
### Steps to reproduce
|
||||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
||||||
and how did you start bitwarden_rs? -->
|
and how did you start vaultwarden? -->
|
||||||
|
|
||||||
### Expected behaviour
|
### Expected behaviour
|
||||||
<!-- Tell us what you expected to happen -->
|
<!-- Tell us what you expected to happen -->
|
||||||
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +1,8 @@
|
|||||||
blank_issues_enabled: false
|
blank_issues_enabled: false
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Discourse forum for bitwarden_rs
|
- name: Discourse forum for vaultwarden
|
||||||
url: https://bitwardenrs.discourse.group/
|
url: https://vaultwarden.discourse.group/
|
||||||
about: Use this forum to request features or get help with usage/configuration.
|
about: Use this forum to request features or get help with usage/configuration.
|
||||||
- name: GitHub Discussions for bitwarden_rs
|
- name: GitHub Discussions for vaultwarden
|
||||||
url: https://github.com/dani-garcia/bitwarden_rs/discussions
|
url: https://github.com/dani-garcia/vaultwarden/discussions
|
||||||
about: An alternative to the Discourse forum, if this is easier for you.
|
about: An alternative to the Discourse forum, if this is easier for you.
|
||||||
|
BIN
.github/security-contact.gif
vendored
Normal file
BIN
.github/security-contact.gif
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.3 KiB |
145
.github/workflows/build.yml
vendored
145
.github/workflows/build.yml
vendored
@@ -2,67 +2,51 @@ name: Build
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
# Ignore when there are only changes done too one of these paths
|
paths:
|
||||||
paths-ignore:
|
- ".github/workflows/build.yml"
|
||||||
- "**.md"
|
- "src/**"
|
||||||
- "**.txt"
|
- "migrations/**"
|
||||||
- "azure-pipelines.yml"
|
- "Cargo.*"
|
||||||
- "docker/**"
|
- "build.rs"
|
||||||
- "hooks/**"
|
- "diesel.toml"
|
||||||
- "tools/**"
|
- "rust-toolchain"
|
||||||
pull_request:
|
pull_request:
|
||||||
# Ignore when there are only changes done too one of these paths
|
paths:
|
||||||
paths-ignore:
|
- ".github/workflows/build.yml"
|
||||||
- "**.md"
|
- "src/**"
|
||||||
- "**.txt"
|
- "migrations/**"
|
||||||
- "azure-pipelines.yml"
|
- "Cargo.*"
|
||||||
- "docker/**"
|
- "build.rs"
|
||||||
- "hooks/**"
|
- "diesel.toml"
|
||||||
- "tools/**"
|
- "rust-toolchain"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
# Make warnings errors, this is to prevent warnings slipping through.
|
||||||
|
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||||
|
env:
|
||||||
|
RUSTFLAGS: "-D warnings"
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
channel:
|
channel:
|
||||||
- nightly
|
- stable
|
||||||
# - stable
|
|
||||||
target-triple:
|
target-triple:
|
||||||
- x86_64-unknown-linux-gnu
|
- x86_64-unknown-linux-gnu
|
||||||
# - x86_64-unknown-linux-musl
|
|
||||||
include:
|
include:
|
||||||
- target-triple: x86_64-unknown-linux-gnu
|
- target-triple: x86_64-unknown-linux-gnu
|
||||||
host-triple: x86_64-unknown-linux-gnu
|
host-triple: x86_64-unknown-linux-gnu
|
||||||
features: "sqlite,mysql,postgresql"
|
features: [sqlite,mysql,postgresql,enable_mimalloc] # Remember to update the `cargo test` to match the amount of features
|
||||||
channel: nightly
|
channel: stable
|
||||||
os: ubuntu-18.04
|
os: ubuntu-20.04
|
||||||
ext:
|
ext: ""
|
||||||
# - target-triple: x86_64-unknown-linux-gnu
|
|
||||||
# host-triple: x86_64-unknown-linux-gnu
|
|
||||||
# features: "sqlite,mysql,postgresql"
|
|
||||||
# channel: stable
|
|
||||||
# os: ubuntu-18.04
|
|
||||||
# ext:
|
|
||||||
# - target-triple: x86_64-unknown-linux-musl
|
|
||||||
# host-triple: x86_64-unknown-linux-gnu
|
|
||||||
# features: "sqlite,postgresql"
|
|
||||||
# channel: nightly
|
|
||||||
# os: ubuntu-18.04
|
|
||||||
# ext:
|
|
||||||
# - target-triple: x86_64-unknown-linux-musl
|
|
||||||
# host-triple: x86_64-unknown-linux-gnu
|
|
||||||
# features: "sqlite,postgresql"
|
|
||||||
# channel: stable
|
|
||||||
# os: ubuntu-18.04
|
|
||||||
# ext:
|
|
||||||
|
|
||||||
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
@@ -81,64 +65,83 @@ jobs:
|
|||||||
|
|
||||||
|
|
||||||
# Enable Rust Caching
|
# Enable Rust Caching
|
||||||
- uses: Swatinem/rust-cache@v1
|
- uses: Swatinem/rust-cache@842ef286fff290e445b90b4002cc9807c3669641 # v1.3.0
|
||||||
# End Enable Rust Caching
|
# End Enable Rust Caching
|
||||||
|
|
||||||
|
|
||||||
# Uses the rust-toolchain file to determine version
|
# Uses the rust-toolchain file to determine version
|
||||||
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||||
uses: actions-rs/toolchain@v1
|
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f # v1.0.6
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
target: ${{ matrix.target-triple }}
|
target: ${{ matrix.target-triple }}
|
||||||
components: clippy
|
components: clippy, rustfmt
|
||||||
# End Uses the rust-toolchain file to determine version
|
# End Uses the rust-toolchain file to determine version
|
||||||
|
|
||||||
|
|
||||||
# Run cargo tests (In release mode to speed up future builds)
|
# Run cargo tests (In release mode to speed up future builds)
|
||||||
- name: '`cargo test --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
# First test all features together, afterwards test them separately.
|
||||||
uses: actions-rs/cargo@v1
|
- name: "`cargo test --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||||
|
# Test single features
|
||||||
|
# 0: sqlite
|
||||||
|
- name: "`cargo test --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}
|
||||||
|
if: ${{ matrix.features[0] != '' }}
|
||||||
|
# 1: mysql
|
||||||
|
- name: "`cargo test --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}
|
||||||
|
if: ${{ matrix.features[1] != '' }}
|
||||||
|
# 2: postgresql
|
||||||
|
- name: "`cargo test --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}
|
||||||
|
if: ${{ matrix.features[2] != '' }}
|
||||||
# End Run cargo tests
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
# Run cargo clippy (In release mode to speed up future builds)
|
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
||||||
- name: '`cargo clippy --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
- name: "`cargo clippy --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||||
with:
|
with:
|
||||||
command: clippy
|
command: clippy
|
||||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} -- -D warnings
|
||||||
# End Run cargo clippy
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo fmt
|
||||||
|
- name: '`cargo fmt`'
|
||||||
|
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||||
|
with:
|
||||||
|
command: fmt
|
||||||
|
args: --all -- --check
|
||||||
|
# End Run cargo fmt
|
||||||
|
|
||||||
|
|
||||||
# Build the binary
|
# Build the binary
|
||||||
- name: '`cargo build --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
- name: "`cargo build --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||||
with:
|
with:
|
||||||
command: build
|
command: build
|
||||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||||
# End Build the binary
|
# End Build the binary
|
||||||
|
|
||||||
|
|
||||||
# Upload artifact to Github Actions
|
# Upload artifact to Github Actions
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0
|
||||||
with:
|
with:
|
||||||
name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||||
path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||||
# End Upload artifact to Github Actions
|
# End Upload artifact to Github Actions
|
||||||
|
|
||||||
|
|
||||||
## This is not used at the moment
|
|
||||||
## We could start using this when we can build static binaries
|
|
||||||
# Upload to github actions release
|
|
||||||
# - name: Release
|
|
||||||
# uses: Shopify/upload-to-release@1
|
|
||||||
# if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
# with:
|
|
||||||
# name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
|
||||||
# path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
|
||||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
# End Upload to github actions release
|
|
||||||
|
15
.github/workflows/hadolint.yml
vendored
15
.github/workflows/hadolint.yml
vendored
@@ -1,8 +1,11 @@
|
|||||||
name: Hadolint
|
name: Hadolint
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- "docker/**"
|
||||||
|
|
||||||
pull_request:
|
pull_request:
|
||||||
# Ignore when there are only changes done too one of these paths
|
|
||||||
paths:
|
paths:
|
||||||
- "docker/**"
|
- "docker/**"
|
||||||
|
|
||||||
@@ -13,22 +16,22 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Download hadolint
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
- name: Download hadolint
|
- name: Download hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||||
sudo chmod +x /usr/local/bin/hadolint
|
sudo chmod +x /usr/local/bin/hadolint
|
||||||
env:
|
env:
|
||||||
HADOLINT_VERSION: 1.19.0
|
HADOLINT_VERSION: 2.10.0
|
||||||
# End Download hadolint
|
# End Download hadolint
|
||||||
|
|
||||||
# Test Dockerfiles
|
# Test Dockerfiles
|
||||||
- name: Run hadolint
|
- name: Run hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored | xargs hadolint
|
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored --cached | xargs hadolint
|
||||||
# End Test Dockerfiles
|
# End Test Dockerfiles
|
||||||
|
119
.github/workflows/release.yml
vendored
Normal file
119
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- ".github/workflows/release.yml"
|
||||||
|
- "src/**"
|
||||||
|
- "migrations/**"
|
||||||
|
- "hooks/**"
|
||||||
|
- "docker/**"
|
||||||
|
- "Cargo.*"
|
||||||
|
- "build.rs"
|
||||||
|
- "diesel.toml"
|
||||||
|
- "rust-toolchain"
|
||||||
|
|
||||||
|
branches: # Only on paths above
|
||||||
|
- main
|
||||||
|
|
||||||
|
tags: # Always, regardless of paths above
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# https://github.com/marketplace/actions/skip-duplicate-actions
|
||||||
|
# Some checks to determine if we need to continue with building a new docker.
|
||||||
|
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||||
|
skip_check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
|
outputs:
|
||||||
|
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||||
|
steps:
|
||||||
|
- name: Skip Duplicates Actions
|
||||||
|
id: skip_check
|
||||||
|
uses: fkirc/skip-duplicate-actions@9d116fa7e55f295019cfab7e3ab72b478bcf7fdd # v4.0.0
|
||||||
|
with:
|
||||||
|
cancel_others: 'true'
|
||||||
|
# Only run this when not creating a tag
|
||||||
|
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
||||||
|
|
||||||
|
docker-build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: skip_check
|
||||||
|
# Start a local docker registry to be used to generate multi-arch images.
|
||||||
|
services:
|
||||||
|
registry:
|
||||||
|
image: registry:2
|
||||||
|
ports:
|
||||||
|
- 5000:5000
|
||||||
|
env:
|
||||||
|
DOCKER_BUILDKIT: 1 # Disabled for now, but we should look at this because it will speedup building!
|
||||||
|
# DOCKER_REPO/secrets.DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
|
||||||
|
DOCKER_REPO: ${{ secrets.DOCKERHUB_REPO }}
|
||||||
|
SOURCE_COMMIT: ${{ github.sha }}
|
||||||
|
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||||
|
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
base_image: ["debian","alpine"]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
# Login to Docker Hub
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v2.0.0
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
# Determine Docker Tag
|
||||||
|
- name: Init Variables
|
||||||
|
id: vars
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# Check which main tag we are going to build determined by github.ref
|
||||||
|
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
||||||
|
echo "set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}"
|
||||||
|
echo "::set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}"
|
||||||
|
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
||||||
|
echo "set-output name=DOCKER_TAG::testing"
|
||||||
|
echo "::set-output name=DOCKER_TAG::testing"
|
||||||
|
fi
|
||||||
|
# End Determine Docker Tag
|
||||||
|
|
||||||
|
- name: Build Debian based images
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||||
|
run: |
|
||||||
|
./hooks/build
|
||||||
|
if: ${{ matrix.base_image == 'debian' }}
|
||||||
|
|
||||||
|
- name: Push Debian based images
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||||
|
run: |
|
||||||
|
./hooks/push
|
||||||
|
if: ${{ matrix.base_image == 'debian' }}
|
||||||
|
|
||||||
|
- name: Build Alpine based images
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||||
|
run: |
|
||||||
|
./hooks/build
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
|
|
||||||
|
- name: Push Alpine based images
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||||
|
run: |
|
||||||
|
./hooks/push
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
38
.pre-commit-config.yaml
Normal file
38
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.2.0
|
||||||
|
hooks:
|
||||||
|
- id: check-yaml
|
||||||
|
- id: check-json
|
||||||
|
- id: check-toml
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
exclude: "(.*js$|.*css$)"
|
||||||
|
- id: check-case-conflict
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: detect-private-key
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: fmt
|
||||||
|
name: fmt
|
||||||
|
description: Format files with cargo fmt.
|
||||||
|
entry: cargo fmt
|
||||||
|
language: system
|
||||||
|
types: [rust]
|
||||||
|
args: ["--", "--check"]
|
||||||
|
- id: cargo-test
|
||||||
|
name: cargo test
|
||||||
|
description: Test the package for errors.
|
||||||
|
entry: cargo test
|
||||||
|
language: system
|
||||||
|
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--"]
|
||||||
|
types: [rust]
|
||||||
|
pass_filenames: false
|
||||||
|
- id: cargo-clippy
|
||||||
|
name: cargo clippy
|
||||||
|
description: Lint Rust sources
|
||||||
|
entry: cargo clippy
|
||||||
|
language: system
|
||||||
|
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
||||||
|
types: [rust]
|
||||||
|
pass_filenames: false
|
2871
Cargo.lock
generated
2871
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
161
Cargo.toml
161
Cargo.toml
@@ -1,16 +1,19 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bitwarden_rs"
|
name = "vaultwarden"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2018"
|
edition = "2021"
|
||||||
|
rust-version = "1.60"
|
||||||
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/bitwarden_rs"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = "GPL-3.0-only"
|
license = "GPL-3.0-only"
|
||||||
publish = false
|
publish = false
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
# default = ["sqlite"]
|
||||||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||||
enable_syslog = []
|
enable_syslog = []
|
||||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||||
@@ -18,120 +21,142 @@ postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
|||||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||||
# Enable to use a vendored and statically linked openssl
|
# Enable to use a vendored and statically linked openssl
|
||||||
vendored_openssl = ["openssl/vendored"]
|
vendored_openssl = ["openssl/vendored"]
|
||||||
|
# Enable MiMalloc memory allocator to replace the default malloc
|
||||||
|
# This can improve performance for Alpine builds
|
||||||
|
enable_mimalloc = ["mimalloc"]
|
||||||
|
|
||||||
# Enable unstable features, requires nightly
|
# Enable unstable features, requires nightly
|
||||||
# Currently only used to enable rusts official ip support
|
# Currently only used to enable rusts official ip support
|
||||||
unstable = []
|
unstable = []
|
||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(not(windows))".dependencies]
|
||||||
syslog = "4.0.1"
|
# Logging
|
||||||
|
syslog = "6.0.1" # Needs to be v4 until fern is updated
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
# Logging
|
||||||
rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
log = "0.4.17"
|
||||||
rocket_contrib = "0.5.0-dev"
|
fern = { version = "0.6.1", features = ["syslog-6"] }
|
||||||
|
tracing = { version = "0.1.34", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||||
|
|
||||||
# HTTP client
|
backtrace = "0.3.65" # Logging panics to logfile instead stderr only
|
||||||
reqwest = { version = "0.11.2", features = ["blocking", "json"] }
|
|
||||||
|
|
||||||
# multipart/form-data support
|
# A `dotenv` implementation for Rust
|
||||||
multipart = { version = "0.17.1", features = ["server"], default-features = false }
|
dotenvy = { version = "0.15.1", default-features = false }
|
||||||
|
|
||||||
# WebSockets library
|
# Lazy initialization
|
||||||
ws = { version = "0.10.0", package = "parity-ws" }
|
once_cell = "1.10.0"
|
||||||
|
|
||||||
# MessagePack library
|
# Numerical libraries
|
||||||
rmpv = "0.4.7"
|
num-traits = "0.2.15"
|
||||||
|
num-derive = "0.3.3"
|
||||||
|
|
||||||
# Concurrent hashmap implementation
|
# Web framework
|
||||||
chashmap = "2.2.2"
|
rocket = { version = "0.5.0-rc.2", features = ["tls", "json"], default-features = false }
|
||||||
|
|
||||||
|
# WebSockets libraries
|
||||||
|
ws = { version = "0.11.1", package = "parity-ws" }
|
||||||
|
rmpv = "1.0.0" # MessagePack library
|
||||||
|
chashmap = "2.2.2" # Concurrent hashmap implementation
|
||||||
|
|
||||||
|
# Async futures
|
||||||
|
futures = "0.3.21"
|
||||||
|
tokio = { version = "1.18.2", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time"] }
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.125", features = ["derive"] }
|
serde = { version = "1.0.137", features = ["derive"] }
|
||||||
serde_json = "1.0.64"
|
serde_json = "1.0.81"
|
||||||
|
|
||||||
# Logging
|
|
||||||
log = "0.4.14"
|
|
||||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "1.4.6", features = [ "chrono", "r2d2"] }
|
diesel = { version = "1.4.8", features = ["chrono", "r2d2"] }
|
||||||
diesel_migrations = "1.4.0"
|
diesel_migrations = "1.4.0"
|
||||||
|
|
||||||
# Bundled SQLite
|
# Bundled SQLite
|
||||||
libsqlite3-sys = { version = "0.20.1", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.22.2", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = "0.8.3"
|
rand = "0.8.5"
|
||||||
ring = "0.16.20"
|
ring = "0.16.20"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "0.8.2", features = ["v4"] }
|
uuid = { version = "1.0.0", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.19", features = ["serde"] }
|
chrono = { version = "0.4.19", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.5.3"
|
chrono-tz = "0.6.1"
|
||||||
time = "0.2.26"
|
time = "0.3.9"
|
||||||
|
|
||||||
# TOTP library
|
# Job scheduler
|
||||||
oath = "0.10.2"
|
job_scheduler = "1.2.1"
|
||||||
|
|
||||||
# Data encoding library
|
# Data encoding library Hex/Base32/Base64
|
||||||
data-encoding = "2.3.2"
|
data-encoding = "2.3.2"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "7.2.0"
|
jsonwebtoken = "8.1.0"
|
||||||
|
|
||||||
# U2F library
|
# TOTP library
|
||||||
u2f = "0.2.0"
|
totp-lite = "1.0.3"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# WebAuthn libraries
|
||||||
dotenv = { version = "0.15.0", default-features = false }
|
webauthn-rs = "0.3.2"
|
||||||
|
|
||||||
# Lazy initialization
|
# Handling of URL's for WebAuthn
|
||||||
once_cell = "1.7.2"
|
url = "2.2.2"
|
||||||
|
|
||||||
# Numerical libraries
|
|
||||||
num-traits = "0.2.14"
|
|
||||||
num-derive = "0.3.3"
|
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.10.0-beta.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
idna = "0.2.3" # Punycode conversion
|
||||||
newline-converter = "0.2.0"
|
lettre = { version = "0.10.0-rc.6", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||||
|
percent-encoding = "2.1.0" # URL encoding library used for URL's in the emails
|
||||||
|
|
||||||
# Template library
|
# Template library
|
||||||
handlebars = { version = "3.5.3", features = ["dir_source"] }
|
handlebars = { version = "4.2.2", features = ["dir_source"] }
|
||||||
|
|
||||||
|
# HTTP client
|
||||||
|
reqwest = { version = "0.11.10", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
html5ever = "0.25.1"
|
html5gum = "0.4.0"
|
||||||
markup5ever_rcdom = "0.1.0"
|
regex = { version = "1.5.5", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
regex = { version = "1.4.5", features = ["std", "perf"], default-features = false }
|
data-url = "0.1.1"
|
||||||
data-url = "0.1.0"
|
bytes = "1.1.0"
|
||||||
|
cached = "0.34.0"
|
||||||
|
|
||||||
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
|
cookie = "0.16.0"
|
||||||
|
cookie_store = "0.16.0"
|
||||||
|
|
||||||
# Used by U2F, JWT and Postgres
|
# Used by U2F, JWT and Postgres
|
||||||
openssl = "0.10.33"
|
openssl = "0.10.40"
|
||||||
|
|
||||||
# URL encoding library
|
|
||||||
percent-encoding = "2.1.0"
|
|
||||||
# Punycode conversion
|
|
||||||
idna = "0.2.2"
|
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
pico-args = "0.4.0"
|
pico-args = "0.4.2"
|
||||||
|
|
||||||
# Logging panics to logfile instead stderr only
|
|
||||||
backtrace = "0.3.56"
|
|
||||||
|
|
||||||
# Macro ident concatenation
|
# Macro ident concatenation
|
||||||
paste = "1.0.5"
|
paste = "1.0.7"
|
||||||
|
governor = "0.4.2"
|
||||||
|
|
||||||
|
# Capture CTRL+C
|
||||||
|
ctrlc = { version = "3.2.2", features = ["termination"] }
|
||||||
|
|
||||||
|
# Allow overriding the default memory allocator
|
||||||
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
|
mimalloc = { version = "0.1.29", features = ["secure"], default-features = false, optional = true }
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# Use newest ring
|
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
# to any issues or PRs for almost a year (as of April 2021). This hopefully
|
||||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
|
||||||
|
# In particular, `cron` has since implemented parsing of some common syntax
|
||||||
|
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
|
||||||
|
# 2022-05-04: Forked/Updated the job_scheduler again use the latest dependencies and some fixes.
|
||||||
|
job_scheduler = { git = 'https://github.com/BlackDex/job_scheduler', rev = '9100fc596a083fd9c0b560f8f11f108e0a19d07e' }
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# Strip debuginfo from the release builds
|
||||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
# Also enable thin LTO for some optimizations
|
||||||
|
[profile.release]
|
||||||
|
strip = "debuginfo"
|
||||||
|
lto = "thin"
|
||||||
|
65
README.md
65
README.md
@@ -1,15 +1,16 @@
|
|||||||
### This is a Bitwarden server API implementation written in Rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/download/)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||||
|
|
||||||
|
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
[](https://travis-ci.org/dani-garcia/bitwarden_rs)
|
[](https://hub.docker.com/r/vaultwarden/server)
|
||||||
[](https://hub.docker.com/r/bitwardenrs/server)
|
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||||
[](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
|
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||||
[](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
|
[](https://github.com/dani-garcia/vaultwarden/blob/master/LICENSE.txt)
|
||||||
[](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
|
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||||
[](https://matrix.to/#/#bitwarden_rs:matrix.org)
|
|
||||||
|
|
||||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
|
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||||
|
|
||||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||||
|
|
||||||
@@ -33,29 +34,57 @@ Basically full implementation of Bitwarden API is provided including:
|
|||||||
Pull the docker image and mount a volume from the host for persistent storage:
|
Pull the docker image and mount a volume from the host for persistent storage:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker pull bitwardenrs/server:latest
|
docker pull vaultwarden/server:latest
|
||||||
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 bitwardenrs/server:latest
|
docker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server:latest
|
||||||
```
|
```
|
||||||
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.
|
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
||||||
|
|
||||||
This can be configured in [bitwarden_rs directly](https://github.com/dani-garcia/bitwarden_rs/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/bitwarden_rs/wiki/Proxy-examples)).
|
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||||
|
|
||||||
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) for more information on how to configure and run the bitwarden_rs server.
|
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||||
|
|
||||||
## Get in touch
|
## Get in touch
|
||||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://bitwardenrs.discourse.group/).
|
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://vaultwarden.discourse.group/).
|
||||||
|
|
||||||
If you spot any bugs or crashes with bitwarden_rs itself, please [create an issue](https://github.com/dani-garcia/bitwarden_rs/issues/). Make sure there aren't any similar issues open, though!
|
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure there aren't any similar issues open, though!
|
||||||
|
|
||||||
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
|
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||||
|
|
||||||
### Sponsors
|
### Sponsors
|
||||||
Thanks for your contribution to the project!
|
Thanks for your contribution to the project!
|
||||||
|
|
||||||
- [@ChonoN](https://github.com/ChonoN)
|
<table>
|
||||||
- [@themightychris](https://github.com/themightychris)
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/netdadaltd">
|
||||||
|
<img src="https://avatars.githubusercontent.com/u/77323954?s=75&v=4" width="75px;" alt="netdadaltd"/>
|
||||||
|
<br />
|
||||||
|
<sub><b>netDada Ltd.</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
<br/>
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/Gyarbij" style="width: 75px">
|
||||||
|
<sub><b>Chono N</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/themightychris">
|
||||||
|
<sub><b>Chris Alfano</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
@@ -1,2 +0,0 @@
|
|||||||
[global.limits]
|
|
||||||
json = 10485760 # 10 MiB
|
|
45
SECURITY.md
Normal file
45
SECURITY.md
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
Vaultwarden tries to prevent security issues but there could always slip something through.
|
||||||
|
If you believe you've found a security issue in our application, we encourage you to
|
||||||
|
notify us. We welcome working with you to resolve the issue promptly. Thanks in advance!
|
||||||
|
|
||||||
|
# Disclosure Policy
|
||||||
|
|
||||||
|
- Let us know as soon as possible upon discovery of a potential security issue, and we'll make every
|
||||||
|
effort to quickly resolve the issue.
|
||||||
|
- Provide us a reasonable amount of time to resolve the issue before any disclosure to the public or a
|
||||||
|
third-party. We may publicly disclose the issue before resolving it, if appropriate.
|
||||||
|
- Make a good faith effort to avoid privacy violations, destruction of data, and interruption or
|
||||||
|
degradation of our service. Only interact with accounts you own or with explicit permission of the
|
||||||
|
account holder.
|
||||||
|
|
||||||
|
# In-scope
|
||||||
|
|
||||||
|
- Security issues in any current release of Vaultwarden. Source code is available at https://github.com/dani-garcia/vaultwarden. This includes the current `latest` release and `main / testing` release.
|
||||||
|
|
||||||
|
# Exclusions
|
||||||
|
|
||||||
|
The following bug classes are out-of scope:
|
||||||
|
|
||||||
|
- Bugs that are already reported on Vaultwarden's issue tracker (https://github.com/dani-garcia/vaultwarden/issues)
|
||||||
|
- Bugs that are not part of Vaultwarden, like on the the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated
|
||||||
|
- Issues in an upstream software dependency (ex: Rust, or External Libraries) which are already reported to the upstream maintainer
|
||||||
|
- Attacks requiring physical access to a user's device
|
||||||
|
- Issues related to software or protocols not under Vaultwarden's control
|
||||||
|
- Vulnerabilities in outdated versions of Vaultwarden
|
||||||
|
- Missing security best practices that do not directly lead to a vulnerability (You may still report them as a normal issue)
|
||||||
|
- Issues that do not have any impact on the general public
|
||||||
|
|
||||||
|
While researching, we'd like to ask you to refrain from:
|
||||||
|
|
||||||
|
- Denial of service
|
||||||
|
- Spamming
|
||||||
|
- Social engineering (including phishing) of Vaultwarden developers, contributors or users
|
||||||
|
|
||||||
|
Thank you for helping keep Vaultwarden and our users safe!
|
||||||
|
|
||||||
|
# How to contact us
|
||||||
|
|
||||||
|
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (user: `@danig:matrix.org`)
|
||||||
|
- You can send an  to report a security issue.
|
||||||
|
- If you want to send an encrypted email you can use the following GPG key:<br>
|
||||||
|
https://keyserver.ubuntu.com/pks/lookup?search=0xB9B7A108373276BF3C0406F9FC8A7D14C3CD543A&fingerprint=on&op=index
|
@@ -1,22 +0,0 @@
|
|||||||
pool:
|
|
||||||
vmImage: 'Ubuntu-18.04'
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- script: |
|
|
||||||
ls -la
|
|
||||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $(cat rust-toolchain) --profile=minimal
|
|
||||||
echo "##vso[task.prependpath]$HOME/.cargo/bin"
|
|
||||||
displayName: 'Install Rust'
|
|
||||||
|
|
||||||
- script: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y --no-install-recommends build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
|
||||||
displayName: 'Install build libraries.'
|
|
||||||
|
|
||||||
- script: |
|
|
||||||
rustc -Vv
|
|
||||||
cargo -V
|
|
||||||
displayName: Query rust and cargo versions
|
|
||||||
|
|
||||||
- script : cargo test --features "sqlite,mysql,postgresql"
|
|
||||||
displayName: 'Test project with sqlite, mysql and postgresql backends'
|
|
49
build.rs
49
build.rs
@@ -1,5 +1,5 @@
|
|||||||
use std::process::Command;
|
|
||||||
use std::env;
|
use std::env;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||||
@@ -11,13 +11,18 @@ fn main() {
|
|||||||
println!("cargo:rustc-cfg=postgresql");
|
println!("cargo:rustc-cfg=postgresql");
|
||||||
|
|
||||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
compile_error!(
|
||||||
|
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||||
|
);
|
||||||
|
|
||||||
if let Ok(version) = env::var("BWRS_VERSION") {
|
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
// If neither exist, read from git.
|
||||||
|
let maybe_vaultwarden_version =
|
||||||
|
env::var("VW_VERSION").or_else(|_| env::var("BWRS_VERSION")).or_else(|_| version_from_git_info());
|
||||||
|
|
||||||
|
if let Ok(version) = maybe_vaultwarden_version {
|
||||||
|
println!("cargo:rustc-env=VW_VERSION={}", version);
|
||||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||||
} else {
|
|
||||||
read_git_info().ok();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,7 +36,13 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This method reads info from Git, namely tags, branch, and revision
|
/// This method reads info from Git, namely tags, branch, and revision
|
||||||
fn read_git_info() -> Result<(), std::io::Error> {
|
/// To access these values, use:
|
||||||
|
/// - env!("GIT_EXACT_TAG")
|
||||||
|
/// - env!("GIT_LAST_TAG")
|
||||||
|
/// - env!("GIT_BRANCH")
|
||||||
|
/// - env!("GIT_REV")
|
||||||
|
/// - env!("VW_VERSION")
|
||||||
|
fn version_from_git_info() -> Result<String, std::io::Error> {
|
||||||
// The exact tag for the current commit, can be empty when
|
// The exact tag for the current commit, can be empty when
|
||||||
// the current commit doesn't have an associated tag
|
// the current commit doesn't have an associated tag
|
||||||
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
|
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
|
||||||
@@ -54,23 +65,11 @@ fn read_git_info() -> Result<(), std::io::Error> {
|
|||||||
println!("cargo:rustc-env=GIT_REV={}", rev_short);
|
println!("cargo:rustc-env=GIT_REV={}", rev_short);
|
||||||
|
|
||||||
// Combined version
|
// Combined version
|
||||||
let version = if let Some(exact) = exact_tag {
|
if let Some(exact) = exact_tag {
|
||||||
exact
|
Ok(exact)
|
||||||
} else if &branch != "master" {
|
} else if &branch != "main" && &branch != "master" {
|
||||||
format!("{}-{} ({})", last_tag, rev_short, branch)
|
Ok(format!("{}-{} ({})", last_tag, rev_short, branch))
|
||||||
} else {
|
} else {
|
||||||
format!("{}-{}", last_tag, rev_short)
|
Ok(format!("{}-{}", last_tag, rev_short))
|
||||||
};
|
}
|
||||||
|
|
||||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
|
||||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
|
||||||
|
|
||||||
// To access these values, use:
|
|
||||||
// env!("GIT_EXACT_TAG")
|
|
||||||
// env!("GIT_LAST_TAG")
|
|
||||||
// env!("GIT_BRANCH")
|
|
||||||
// env!("GIT_REV")
|
|
||||||
// env!("BWRS_VERSION")
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
@@ -1,7 +1,8 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||||
# manifest, rather than the target arch. For example:
|
# manifest, rather than the target arch. For example:
|
||||||
#
|
#
|
||||||
# $ docker inspect bitwardenrs/server:latest-armv7 | jq -r '.[]|.Architecture'
|
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||||
# amd64
|
# amd64
|
||||||
#
|
#
|
||||||
# Recent versions of Docker have started printing a warning when the image's
|
# Recent versions of Docker have started printing a warning when the image's
|
||||||
|
@@ -1,31 +1,41 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
{% set build_stage_base_image = "rust:1.50" %}
|
{% set build_stage_base_image = "rust:1.61-bullseye" %}
|
||||||
{% if "alpine" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
{% if "amd64" in target_file %}
|
{% if "amd64" in target_file %}
|
||||||
{% set build_stage_base_image = "clux/muslrust:nightly-2021-02-22" %}
|
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable-1.61.0" %}
|
||||||
{% set runtime_stage_base_image = "alpine:3.13" %}
|
{% set runtime_stage_base_image = "alpine:3.15" %}
|
||||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||||
{% elif "armv7" in target_file %}
|
{% elif "armv7" in target_file %}
|
||||||
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable-1.61.0" %}
|
||||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.13" %}
|
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.15" %}
|
||||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||||
|
{% elif "armv6" in target_file %}
|
||||||
|
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable-1.61.0" %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.15" %}
|
||||||
|
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
||||||
|
{% elif "arm64" in target_file %}
|
||||||
|
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable-1.61.0" %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.15" %}
|
||||||
|
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% elif "amd64" in target_file %}
|
{% elif "amd64" in target_file %}
|
||||||
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
{% set runtime_stage_base_image = "debian:bullseye-slim" %}
|
||||||
{% elif "arm64" in target_file %}
|
{% elif "arm64" in target_file %}
|
||||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
{% set runtime_stage_base_image = "balenalib/aarch64-debian:bullseye" %}
|
||||||
{% set package_arch_name = "arm64" %}
|
{% set package_arch_name = "arm64" %}
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||||
{% elif "armv6" in target_file %}
|
{% elif "armv6" in target_file %}
|
||||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
{% set runtime_stage_base_image = "balenalib/rpi-debian:bullseye" %}
|
||||||
{% set package_arch_name = "armel" %}
|
{% set package_arch_name = "armel" %}
|
||||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||||
{% elif "armv7" in target_file %}
|
{% elif "armv7" in target_file %}
|
||||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:bullseye" %}
|
||||||
{% set package_arch_name = "armhf" %}
|
{% set package_arch_name = "armhf" %}
|
||||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||||
@@ -40,97 +50,101 @@
|
|||||||
{% else %}
|
{% else %}
|
||||||
{% set package_arch_target_param = "" %}
|
{% set package_arch_target_param = "" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if "buildx" in target_file %}
|
||||||
|
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
||||||
|
{% else %}
|
||||||
|
{% set mount_rust_cache = "" %}
|
||||||
|
{% endif %}
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_version = "2.19.0" %}
|
{% set vault_version = "2.28.1" %}
|
||||||
{% set vault_image_digest = "sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4" %}
|
{% set vault_image_digest = "sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5" %}
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# Using the digest instead of the tag name provides better security,
|
# Using the digest instead of the tag name provides better security,
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# be changed to point to a malicious image.
|
# be changed to point to a malicious image.
|
||||||
#
|
#
|
||||||
# To verify the current digest for a given tag name:
|
# To verify the current digest for a given tag name:
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v{{ vault_version }}
|
# $ docker pull vaultwarden/web-vault:v{{ vault_version }}
|
||||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" bitwardenrs/web-vault:v{{ vault_version }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:v{{ vault_version }}
|
||||||
# [bitwardenrs/web-vault@{{ vault_image_digest }}]
|
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" bitwardenrs/web-vault@{{ vault_image_digest }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
# [bitwardenrs/web-vault:v{{ vault_version }}]
|
# [vaultwarden/web-vault:v{{ vault_version }}]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@{{ vault_image_digest }} as vault
|
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM {{ build_stage_base_image }} as build
|
FROM {{ build_stage_base_image }} as build
|
||||||
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "amd64" in target_file %}
|
|
||||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
|
||||||
ARG DB=sqlite,postgresql
|
|
||||||
{% set features = "sqlite,postgresql" %}
|
|
||||||
{% else %}
|
|
||||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
|
||||||
ARG DB=sqlite
|
|
||||||
{% set features = "sqlite" %}
|
|
||||||
{% endif %}
|
|
||||||
{% else %}
|
|
||||||
# Debian-based builds support multidb
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
{% set features = "sqlite,mysql,postgresql" %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
# Don't download rust docs
|
{# {% if "alpine" not in target_file and "buildx" in target_file %}
|
||||||
RUN rustup set profile minimal
|
# Debian based Buildx builds can use some special apt caching to speedup building.
|
||||||
|
# By default Debian based images have some rules to keep docker builds clean, we need to remove this.
|
||||||
|
# See: https://hub.docker.com/r/docker/dockerfile
|
||||||
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||||
|
{% endif %} #}
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
{% if "alpine" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
ENV USER "root"
|
{% if "armv6" in target_file %}
|
||||||
ENV RUSTFLAGS='-C link-arg=-s'
|
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||||
{% if "armv7" in target_file %}
|
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
||||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% elif "arm" in target_file %}
|
{% elif "arm" in target_file %}
|
||||||
|
#
|
||||||
# Install required build libs for {{ package_arch_name }} architecture.
|
# Install required build libs for {{ package_arch_name }} architecture.
|
||||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
# hadolint ignore=DL3059
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
RUN dpkg --add-architecture {{ package_arch_name }} \
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture {{ package_arch_name }} \
|
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libssl-dev{{ package_arch_prefix }} \
|
libssl-dev{{ package_arch_prefix }} \
|
||||||
libc6-dev{{ package_arch_prefix }} \
|
libc6-dev{{ package_arch_prefix }} \
|
||||||
libpq5{{ package_arch_prefix }} \
|
libpq5{{ package_arch_prefix }} \
|
||||||
libpq-dev \
|
libpq-dev{{ package_arch_prefix }} \
|
||||||
|
libmariadb3{{ package_arch_prefix }} \
|
||||||
libmariadb-dev{{ package_arch_prefix }} \
|
libmariadb-dev{{ package_arch_prefix }} \
|
||||||
libmariadb-dev-compat{{ package_arch_prefix }}
|
libmariadb-dev-compat{{ package_arch_prefix }} \
|
||||||
|
gcc-{{ package_cross_compiler }} \
|
||||||
|
#
|
||||||
|
# Make sure cargo has the right target config
|
||||||
|
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
|
||||||
|
|
||||||
|
# Set arm specific environment values
|
||||||
|
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
|
||||||
|
CROSS_COMPILE="1" \
|
||||||
|
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
||||||
|
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||||
|
|
||||||
|
{% elif "amd64" in target_file %}
|
||||||
|
# Install DB packages
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
|
||||||
gcc-{{ package_cross_compiler }} \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if "amd64" in target_file and "alpine" not in target_file %}
|
|
||||||
# Install DB packages
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libmariadb-dev{{ package_arch_prefix }} \
|
libmariadb-dev{{ package_arch_prefix }} \
|
||||||
libpq-dev{{ package_arch_prefix }} \
|
libpq-dev{{ package_arch_prefix }} \
|
||||||
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
@@ -143,39 +157,23 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
{% if "alpine" not in target_file %}
|
|
||||||
{% if "arm" in target_file %}
|
|
||||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
|
||||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
|
||||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
|
||||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
|
||||||
# What we can do is a force install, because nothing important is overlapping each other.
|
|
||||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
|
||||||
apt-get download libmariadb-dev-compat:amd64 && \
|
|
||||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
|
||||||
rm -rvf ./libmariadb-dev-compat*.deb
|
|
||||||
|
|
||||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
|
||||||
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
|
||||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
|
||||||
# Without this specific file the ld command will fail and compilation fails with it.
|
|
||||||
RUN ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
|
||||||
|
|
||||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
|
||||||
{% endif -%}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_target is defined %}
|
{% if package_arch_target is defined %}
|
||||||
RUN rustup target add {{ package_arch_target }}
|
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
{% if "alpine" in target_file %}
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
{% else %}
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -186,76 +184,66 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
# hadolint ignore=DL3059
|
||||||
{% if "alpine" in target_file %}
|
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||||
{% if "armv7" in target_file %}
|
|
||||||
RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM {{ runtime_stage_base_image }}
|
FROM {{ runtime_stage_base_image }}
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_PROFILE="release" \
|
||||||
ENV ROCKET_PORT=80
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ENV ROCKET_WORKERS=10
|
ROCKET_PORT=80
|
||||||
{% if "alpine" in runtime_stage_base_image %}
|
{%- if "alpine" in runtime_stage_base_image %} \
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
{% if "amd64" not in target_file %}
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
# Install needed libraries
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
{% if "alpine" in runtime_stage_base_image %}
|
{% if "alpine" in runtime_stage_base_image %}
|
||||||
RUN apk add --no-cache \
|
&& apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
|
tzdata \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
{% if "sqlite" in features %}
|
|
||||||
sqlite \
|
|
||||||
{% endif %}
|
|
||||||
{% if "mysql" in features %}
|
|
||||||
mariadb-connector-c \
|
|
||||||
{% endif %}
|
|
||||||
{% if "postgresql" in features %}
|
|
||||||
postgresql-libs \
|
|
||||||
{% endif %}
|
|
||||||
ca-certificates
|
ca-certificates
|
||||||
{% else %}
|
{% else %}
|
||||||
RUN apt-get update && apt-get install -y \
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
{% if "amd64" not in target_file %}
|
{% if "amd64" not in target_file %}
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
EXPOSE 3012
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
{% if package_arch_target is defined %}
|
{% if package_arch_target is defined %}
|
||||||
COPY --from=build /app/target/{{ package_arch_target }}/release/bitwarden_rs .
|
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
||||||
{% else %}
|
{% else %}
|
||||||
COPY --from=build /app/target/release/bitwarden_rs .
|
COPY --from=build /app/target/release/vaultwarden .
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
@@ -264,6 +252,9 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
@@ -7,3 +7,9 @@ all: $(OBJECTS)
|
|||||||
|
|
||||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||||
|
|
||||||
|
%/Dockerfile.buildx: Dockerfile.j2 render_template
|
||||||
|
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||||
|
|
||||||
|
%/Dockerfile.buildx.alpine: Dockerfile.j2 render_template
|
||||||
|
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||||
|
@@ -1,3 +1,5 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
@@ -11,36 +13,44 @@
|
|||||||
# be changed to point to a malicious image.
|
# be changed to point to a malicious image.
|
||||||
#
|
#
|
||||||
# To verify the current digest for a given tag name:
|
# To verify the current digest for a given tag name:
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.50 as build
|
FROM rust:1.61-bullseye as build
|
||||||
|
|
||||||
|
|
||||||
# Debian-based builds support multidb
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
# Install DB packages
|
# Install DB packages
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libmariadb-dev \
|
libmariadb-dev \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
@@ -53,11 +63,14 @@ COPY ./rust-toolchain ./rust-toolchain
|
|||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -68,39 +81,42 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM debian:buster-slim
|
FROM debian:bullseye-slim
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_PROFILE="release" \
|
||||||
ENV ROCKET_PORT=80
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ENV ROCKET_WORKERS=10
|
ROCKET_PORT=80
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
EXPOSE 3012
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/release/bitwarden_rs .
|
COPY --from=build /app/target/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -108,6 +124,9 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
@@ -1,3 +1,5 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
@@ -11,33 +13,37 @@
|
|||||||
# be changed to point to a malicious image.
|
# be changed to point to a malicious image.
|
||||||
#
|
#
|
||||||
# To verify the current digest for a given tag name:
|
# To verify the current digest for a given tag name:
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM clux/muslrust:nightly-2021-02-22 as build
|
FROM blackdex/rust-musl:x86_64-musl-stable-1.61.0 as build
|
||||||
|
|
||||||
|
|
||||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
|
||||||
ARG DB=sqlite,postgresql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
ENV USER "root"
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
ENV RUSTFLAGS='-C link-arg=-s'
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
@@ -50,11 +56,15 @@ COPY ./build.rs ./build.rs
|
|||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
RUN rustup target add x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -65,37 +75,40 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM alpine:3.13
|
FROM alpine:3.15
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_PROFILE="release" \
|
||||||
ENV ROCKET_PORT=80
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ENV ROCKET_WORKERS=10
|
ROCKET_PORT=80 \
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
|
tzdata \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite \
|
|
||||||
postgresql-libs \
|
|
||||||
ca-certificates
|
ca-certificates
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
EXPOSE 3012
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -103,6 +116,9 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
132
docker/amd64/Dockerfile.buildx
Normal file
132
docker/amd64/Dockerfile.buildx
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.61-bullseye as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
# Install DB packages
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libmariadb-dev \
|
||||||
|
libpq-dev \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM debian:bullseye-slim
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80
|
||||||
|
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
124
docker/amd64/Dockerfile.buildx.alpine
Normal file
124
docker/amd64/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM blackdex/rust-musl:x86_64-musl-stable-1.61.0 as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM alpine:3.15
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,3 +1,5 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
@@ -11,57 +13,64 @@
|
|||||||
# be changed to point to a malicious image.
|
# be changed to point to a malicious image.
|
||||||
#
|
#
|
||||||
# To verify the current digest for a given tag name:
|
# To verify the current digest for a given tag name:
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.50 as build
|
FROM rust:1.61-bullseye as build
|
||||||
|
|
||||||
|
|
||||||
# Debian-based builds support multidb
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
#
|
||||||
# Install required build libs for arm64 architecture.
|
# Install required build libs for arm64 architecture.
|
||||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
# hadolint ignore=DL3059
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
RUN dpkg --add-architecture arm64 \
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libssl-dev:arm64 \
|
libssl-dev:arm64 \
|
||||||
libc6-dev:arm64 \
|
libc6-dev:arm64 \
|
||||||
libpq5:arm64 \
|
libpq5:arm64 \
|
||||||
libpq-dev \
|
libpq-dev:arm64 \
|
||||||
|
libmariadb3:arm64 \
|
||||||
libmariadb-dev:arm64 \
|
libmariadb-dev:arm64 \
|
||||||
libmariadb-dev-compat:arm64
|
libmariadb-dev-compat:arm64 \
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
gcc-aarch64-linux-gnu \
|
||||||
&& mkdir -p ~/.cargo \
|
#
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
# Make sure cargo has the right target config
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
|
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
|
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
||||||
|
|
||||||
|
# Set arm specific environment values
|
||||||
|
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||||
|
CROSS_COMPILE="1" \
|
||||||
|
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||||
|
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
@@ -72,33 +81,16 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
|
||||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
|
||||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
|
||||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
|
||||||
# What we can do is a force install, because nothing important is overlapping each other.
|
|
||||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
|
||||||
apt-get download libmariadb-dev-compat:amd64 && \
|
|
||||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
|
||||||
rm -rvf ./libmariadb-dev-compat*.deb
|
|
||||||
|
|
||||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
|
||||||
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
|
||||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
|
||||||
# Without this specific file the ld command will fail and compilation fails with it.
|
|
||||||
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
|
||||||
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
RUN rustup target add aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -109,33 +101,35 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM balenalib/aarch64-debian:buster
|
FROM balenalib/aarch64-debian:bullseye
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_PROFILE="release" \
|
||||||
ENV ROCKET_PORT=80
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ENV ROCKET_WORKERS=10
|
ROCKET_PORT=80
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
# Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
# hadolint ignore=DL3059
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -144,9 +138,9 @@ EXPOSE 3012
|
|||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -154,6 +148,9 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
128
docker/arm64/Dockerfile.alpine
Normal file
128
docker/arm64/Dockerfile.alpine
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM blackdex/rust-musl:aarch64-musl-stable-1.61.0 as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN rustup target add aarch64-unknown-linux-musl
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/aarch64-alpine:3.15
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
156
docker/arm64/Dockerfile.buildx
Normal file
156
docker/arm64/Dockerfile.buildx
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.61-bullseye as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
#
|
||||||
|
# Install required build libs for arm64 architecture.
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN dpkg --add-architecture arm64 \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:arm64 \
|
||||||
|
libc6-dev:arm64 \
|
||||||
|
libpq5:arm64 \
|
||||||
|
libpq-dev:arm64 \
|
||||||
|
libmariadb3:arm64 \
|
||||||
|
libmariadb-dev:arm64 \
|
||||||
|
libmariadb-dev-compat:arm64 \
|
||||||
|
gcc-aarch64-linux-gnu \
|
||||||
|
#
|
||||||
|
# Make sure cargo has the right target config
|
||||||
|
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
||||||
|
|
||||||
|
# Set arm specific environment values
|
||||||
|
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||||
|
CROSS_COMPILE="1" \
|
||||||
|
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||||
|
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||||
|
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/aarch64-debian:bullseye
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
128
docker/arm64/Dockerfile.buildx.alpine
Normal file
128
docker/arm64/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM blackdex/rust-musl:aarch64-musl-stable-1.61.0 as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/aarch64-alpine:3.15
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,3 +1,5 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
@@ -11,57 +13,64 @@
|
|||||||
# be changed to point to a malicious image.
|
# be changed to point to a malicious image.
|
||||||
#
|
#
|
||||||
# To verify the current digest for a given tag name:
|
# To verify the current digest for a given tag name:
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.50 as build
|
FROM rust:1.61-bullseye as build
|
||||||
|
|
||||||
|
|
||||||
# Debian-based builds support multidb
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
#
|
||||||
# Install required build libs for armel architecture.
|
# Install required build libs for armel architecture.
|
||||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
# hadolint ignore=DL3059
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
RUN dpkg --add-architecture armel \
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libssl-dev:armel \
|
libssl-dev:armel \
|
||||||
libc6-dev:armel \
|
libc6-dev:armel \
|
||||||
libpq5:armel \
|
libpq5:armel \
|
||||||
libpq-dev \
|
libpq-dev:armel \
|
||||||
|
libmariadb3:armel \
|
||||||
libmariadb-dev:armel \
|
libmariadb-dev:armel \
|
||||||
libmariadb-dev-compat:armel
|
libmariadb-dev-compat:armel \
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
gcc-arm-linux-gnueabi \
|
||||||
&& mkdir -p ~/.cargo \
|
#
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
# Make sure cargo has the right target config
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
|
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
|
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
||||||
|
|
||||||
|
# Set arm specific environment values
|
||||||
|
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||||
|
CROSS_COMPILE="1" \
|
||||||
|
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||||
|
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
@@ -72,33 +81,16 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
|
||||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
|
||||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
|
||||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
|
||||||
# What we can do is a force install, because nothing important is overlapping each other.
|
|
||||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
|
||||||
apt-get download libmariadb-dev-compat:amd64 && \
|
|
||||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
|
||||||
rm -rvf ./libmariadb-dev-compat*.deb
|
|
||||||
|
|
||||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
|
||||||
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
|
||||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
|
||||||
# Without this specific file the ld command will fail and compilation fails with it.
|
|
||||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
|
||||||
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
RUN rustup target add arm-unknown-linux-gnueabi
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -109,33 +101,35 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM balenalib/rpi-debian:buster
|
FROM balenalib/rpi-debian:bullseye
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_PROFILE="release" \
|
||||||
ENV ROCKET_PORT=80
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ENV ROCKET_WORKERS=10
|
ROCKET_PORT=80
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
# Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
# hadolint ignore=DL3059
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -144,9 +138,9 @@ EXPOSE 3012
|
|||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -154,6 +148,9 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
130
docker/armv6/Dockerfile.alpine
Normal file
130
docker/armv6/Dockerfile.alpine
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM blackdex/rust-musl:arm-musleabi-stable-1.61.0 as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||||
|
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN rustup target add arm-unknown-linux-musleabi
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/rpi-alpine:3.15
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
156
docker/armv6/Dockerfile.buildx
Normal file
156
docker/armv6/Dockerfile.buildx
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.61-bullseye as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
#
|
||||||
|
# Install required build libs for armel architecture.
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN dpkg --add-architecture armel \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armel \
|
||||||
|
libc6-dev:armel \
|
||||||
|
libpq5:armel \
|
||||||
|
libpq-dev:armel \
|
||||||
|
libmariadb3:armel \
|
||||||
|
libmariadb-dev:armel \
|
||||||
|
libmariadb-dev-compat:armel \
|
||||||
|
gcc-arm-linux-gnueabi \
|
||||||
|
#
|
||||||
|
# Make sure cargo has the right target config
|
||||||
|
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
||||||
|
|
||||||
|
# Set arm specific environment values
|
||||||
|
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||||
|
CROSS_COMPILE="1" \
|
||||||
|
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||||
|
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||||
|
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/rpi-debian:bullseye
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
130
docker/armv6/Dockerfile.buildx.alpine
Normal file
130
docker/armv6/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM blackdex/rust-musl:arm-musleabi-stable-1.61.0 as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||||
|
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/rpi-alpine:3.15
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,3 +1,5 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
@@ -11,57 +13,64 @@
|
|||||||
# be changed to point to a malicious image.
|
# be changed to point to a malicious image.
|
||||||
#
|
#
|
||||||
# To verify the current digest for a given tag name:
|
# To verify the current digest for a given tag name:
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.50 as build
|
FROM rust:1.61-bullseye as build
|
||||||
|
|
||||||
|
|
||||||
# Debian-based builds support multidb
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
#
|
||||||
# Install required build libs for armhf architecture.
|
# Install required build libs for armhf architecture.
|
||||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
# hadolint ignore=DL3059
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
RUN dpkg --add-architecture armhf \
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libssl-dev:armhf \
|
libssl-dev:armhf \
|
||||||
libc6-dev:armhf \
|
libc6-dev:armhf \
|
||||||
libpq5:armhf \
|
libpq5:armhf \
|
||||||
libpq-dev \
|
libpq-dev:armhf \
|
||||||
|
libmariadb3:armhf \
|
||||||
libmariadb-dev:armhf \
|
libmariadb-dev:armhf \
|
||||||
libmariadb-dev-compat:armhf
|
libmariadb-dev-compat:armhf \
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
gcc-arm-linux-gnueabihf \
|
||||||
&& mkdir -p ~/.cargo \
|
#
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
# Make sure cargo has the right target config
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
|
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
|
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
||||||
|
|
||||||
|
# Set arm specific environment values
|
||||||
|
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||||
|
CROSS_COMPILE="1" \
|
||||||
|
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||||
|
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
@@ -72,33 +81,16 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
|
||||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
|
||||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
|
||||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
|
||||||
# What we can do is a force install, because nothing important is overlapping each other.
|
|
||||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
|
||||||
apt-get download libmariadb-dev-compat:amd64 && \
|
|
||||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
|
||||||
rm -rvf ./libmariadb-dev-compat*.deb
|
|
||||||
|
|
||||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
|
||||||
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
|
||||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
|
||||||
# Without this specific file the ld command will fail and compilation fails with it.
|
|
||||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
|
||||||
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -109,33 +101,35 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM balenalib/armv7hf-debian:buster
|
FROM balenalib/armv7hf-debian:bullseye
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_PROFILE="release" \
|
||||||
ENV ROCKET_PORT=80
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ENV ROCKET_WORKERS=10
|
ROCKET_PORT=80
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
# Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
# hadolint ignore=DL3059
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -144,9 +138,9 @@ EXPOSE 3012
|
|||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -154,6 +148,9 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
@@ -1,3 +1,5 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
@@ -11,34 +13,37 @@
|
|||||||
# be changed to point to a malicious image.
|
# be changed to point to a malicious image.
|
||||||
#
|
#
|
||||||
# To verify the current digest for a given tag name:
|
# To verify the current digest for a given tag name:
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.61.0 as build
|
||||||
|
|
||||||
|
|
||||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
ENV USER "root"
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
ENV RUSTFLAGS='-C link-arg=-s'
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
@@ -51,11 +56,15 @@ COPY ./build.rs ./build.rs
|
|||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -66,31 +75,33 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM balenalib/armv7hf-alpine:3.13
|
FROM balenalib/armv7hf-alpine:3.15
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_PROFILE="release" \
|
||||||
ENV ROCKET_PORT=80
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ENV ROCKET_WORKERS=10
|
ROCKET_PORT=80 \
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
# Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN apk add --no-cache \
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
|
tzdata \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite \
|
|
||||||
ca-certificates
|
ca-certificates
|
||||||
|
|
||||||
RUN mkdir /data
|
# hadolint ignore=DL3059
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -99,9 +110,9 @@ EXPOSE 3012
|
|||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
|
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -109,6 +120,9 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
156
docker/armv7/Dockerfile.buildx
Normal file
156
docker/armv7/Dockerfile.buildx
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.61-bullseye as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
#
|
||||||
|
# Install required build libs for armhf architecture.
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN dpkg --add-architecture armhf \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armhf \
|
||||||
|
libc6-dev:armhf \
|
||||||
|
libpq5:armhf \
|
||||||
|
libpq-dev:armhf \
|
||||||
|
libmariadb3:armhf \
|
||||||
|
libmariadb-dev:armhf \
|
||||||
|
libmariadb-dev-compat:armhf \
|
||||||
|
gcc-arm-linux-gnueabihf \
|
||||||
|
#
|
||||||
|
# Make sure cargo has the right target config
|
||||||
|
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
||||||
|
|
||||||
|
# Set arm specific environment values
|
||||||
|
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||||
|
CROSS_COMPILE="1" \
|
||||||
|
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||||
|
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||||
|
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/armv7hf-debian:bullseye
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
128
docker/armv7/Dockerfile.buildx.alpine
Normal file
128
docker/armv7/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.28.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.28.1
|
||||||
|
# [vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5
|
||||||
|
# [vaultwarden/web-vault:v2.28.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:df7f12b1e22bf0dfc1b6b6f46921b4e9e649561931ba65357c1eb1963514b3b5 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.61.0 as build
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
||||||
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/armv7hf-alpine:3.15
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
# We should be able to remove the dumb-init now with Rocket 0.5
|
||||||
|
# But the balenalib images have some issues with there entry.sh
|
||||||
|
# See: https://github.com/balena-io-library/base-images/issues/735
|
||||||
|
# Lets keep using dumb-init for now, since that is working fine.
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,10 +1,20 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
if [ -r /etc/bitwarden_rs.sh ]; then
|
if [ -r /etc/vaultwarden.sh ]; then
|
||||||
|
. /etc/vaultwarden.sh
|
||||||
|
elif [ -r /etc/bitwarden_rs.sh ]; then
|
||||||
|
echo "### You are using the old /etc/bitwarden_rs.sh script, please migrate to /etc/vaultwarden.sh ###"
|
||||||
. /etc/bitwarden_rs.sh
|
. /etc/bitwarden_rs.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d /etc/bitwarden_rs.d ]; then
|
if [ -d /etc/vaultwarden.d ]; then
|
||||||
|
for f in /etc/vaultwarden.d/*.sh; do
|
||||||
|
if [ -r $f ]; then
|
||||||
|
. $f
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
elif [ -d /etc/bitwarden_rs.d ]; then
|
||||||
|
echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###"
|
||||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||||
if [ -r $f ]; then
|
if [ -r $f ]; then
|
||||||
. $f
|
. $f
|
||||||
@@ -12,4 +22,4 @@ if [ -d /etc/bitwarden_rs.d ]; then
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exec /bitwarden_rs "${@}"
|
exec /vaultwarden "${@}"
|
||||||
|
@@ -10,7 +10,7 @@ Docker Hub hooks provide these predefined [environment variables](https://docs.d
|
|||||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||||
|
|
||||||
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
||||||
|
@@ -7,10 +7,5 @@ arches=(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||||
# The Alpine image build currently only works for certain arches.
|
|
||||||
distro_suffix=.alpine
|
distro_suffix=.alpine
|
||||||
arches=(
|
|
||||||
amd64
|
|
||||||
armv7
|
|
||||||
)
|
|
||||||
fi
|
fi
|
||||||
|
@@ -22,7 +22,7 @@ fi
|
|||||||
LABELS=(
|
LABELS=(
|
||||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/bitwarden_rs/wiki"
|
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
org.opencontainers.image.licenses="GPL-3.0-only"
|
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||||
@@ -34,12 +34,17 @@ for label in "${LABELS[@]}"; do
|
|||||||
LABEL_ARGS+=(--label "${label}")
|
LABEL_ARGS+=(--label "${label}")
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildx as template
|
||||||
|
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
||||||
|
buildx_suffix=.buildx
|
||||||
|
fi
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
for arch in "${arches[@]}"; do
|
||||||
docker build \
|
docker build \
|
||||||
"${LABEL_ARGS[@]}" \
|
"${LABEL_ARGS[@]}" \
|
||||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||||
-f docker/${arch}/Dockerfile${distro_suffix} \
|
-f docker/${arch}/Dockerfile${buildx_suffix}${distro_suffix} \
|
||||||
.
|
.
|
||||||
done
|
done
|
||||||
|
17
hooks/push
17
hooks/push
@@ -10,7 +10,7 @@ join() { local IFS="$1"; shift; echo "$*"; }
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
echo ">>> Starting local Docker registry..."
|
echo ">>> Starting local Docker registry when needed..."
|
||||||
|
|
||||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
||||||
# builds, but it can't access existing images on the Docker host (like the
|
# builds, but it can't access existing images on the Docker host (like the
|
||||||
@@ -25,7 +25,13 @@ echo ">>> Starting local Docker registry..."
|
|||||||
# Use host networking so the buildx container can access the registry via
|
# Use host networking so the buildx container can access the registry via
|
||||||
# localhost.
|
# localhost.
|
||||||
#
|
#
|
||||||
docker run -d --name registry --network host registry:2 # defaults to port 5000
|
# First check if there already is a registry container running, else skip it.
|
||||||
|
# This will only happen either locally or running it via Github Actions
|
||||||
|
#
|
||||||
|
if ! timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5000'; then
|
||||||
|
# defaults to port 5000
|
||||||
|
docker run -d --name registry --network host registry:2
|
||||||
|
fi
|
||||||
|
|
||||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
||||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
||||||
@@ -49,7 +55,12 @@ echo ">>> Setting up Docker Buildx..."
|
|||||||
#
|
#
|
||||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
||||||
#
|
#
|
||||||
|
# Check if there already is a builder running, else skip this and use the existing.
|
||||||
|
# This will only happen either locally or running it via Github Actions
|
||||||
|
#
|
||||||
|
if ! docker buildx inspect builder > /dev/null 2>&1 ; then
|
||||||
docker buildx create --name builder --use --driver-opt network=host
|
docker buildx create --name builder --use --driver-opt network=host
|
||||||
|
fi
|
||||||
|
|
||||||
echo ">>> Running Docker Buildx..."
|
echo ">>> Running Docker Buildx..."
|
||||||
|
|
||||||
@@ -103,7 +114,7 @@ docker buildx build \
|
|||||||
# (https://github.com/moby/moby/issues/41017).
|
# (https://github.com/moby/moby/issues/41017).
|
||||||
#
|
#
|
||||||
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||||
# existing bitwarden_rs tags, which adhere to the naming conventions of the
|
# existing vaultwarden tags, which adhere to the naming conventions of the
|
||||||
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||||
# Unfortunately, these per-arch repo names aren't always consistent with the
|
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||||
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||||
|
2
migrations/mysql/2021-04-30-233251_add_reprompt/up.sql
Normal file
2
migrations/mysql/2021-04-30-233251_add_reprompt/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN reprompt INTEGER;
|
2
migrations/mysql/2021-05-11-205202_add_hide_email/up.sql
Normal file
2
migrations/mysql/2021-05-11-205202_add_hide_email/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE sends
|
||||||
|
ADD COLUMN hide_email BOOLEAN;
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN private_key TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN public_key TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE emergency_access;
|
@@ -0,0 +1,14 @@
|
|||||||
|
CREATE TABLE emergency_access (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
grantor_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
grantee_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
email VARCHAR(255),
|
||||||
|
key_encrypted TEXT,
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
status INTEGER NOT NULL,
|
||||||
|
wait_time_days INTEGER NOT NULL,
|
||||||
|
recovery_initiated_at DATETIME,
|
||||||
|
last_notification_at DATETIME,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
created_at DATETIME NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_incomplete;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE twofactor_incomplete (
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
|
||||||
|
device_uuid CHAR(36) NOT NULL,
|
||||||
|
device_name TEXT NOT NULL,
|
||||||
|
login_time DATETIME NOT NULL,
|
||||||
|
ip_address TEXT NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, device_uuid)
|
||||||
|
);
|
2
migrations/mysql/2022-01-17-234911_add_api_key/up.sql
Normal file
2
migrations/mysql/2022-01-17-234911_add_api_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users
|
||||||
|
ADD COLUMN api_key VARCHAR(255);
|
@@ -0,0 +1,4 @@
|
|||||||
|
-- First remove the previous primary key
|
||||||
|
ALTER TABLE devices DROP PRIMARY KEY;
|
||||||
|
-- Add a new combined one
|
||||||
|
ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid);
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN reprompt INTEGER;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE sends
|
||||||
|
ADD COLUMN hide_email BOOLEAN;
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN private_key TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN public_key TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE emergency_access;
|
@@ -0,0 +1,14 @@
|
|||||||
|
CREATE TABLE emergency_access (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
grantor_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
grantee_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
email VARCHAR(255),
|
||||||
|
key_encrypted TEXT,
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
status INTEGER NOT NULL,
|
||||||
|
wait_time_days INTEGER NOT NULL,
|
||||||
|
recovery_initiated_at TIMESTAMP,
|
||||||
|
last_notification_at TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL,
|
||||||
|
created_at TIMESTAMP NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_incomplete;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE twofactor_incomplete (
|
||||||
|
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
|
||||||
|
device_uuid VARCHAR(40) NOT NULL,
|
||||||
|
device_name TEXT NOT NULL,
|
||||||
|
login_time TIMESTAMP NOT NULL,
|
||||||
|
ip_address TEXT NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, device_uuid)
|
||||||
|
);
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users
|
||||||
|
ADD COLUMN api_key TEXT;
|
@@ -0,0 +1,4 @@
|
|||||||
|
-- First remove the previous primary key
|
||||||
|
ALTER TABLE devices DROP CONSTRAINT devices_pkey;
|
||||||
|
-- Add a new combined one
|
||||||
|
ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid);
|
2
migrations/sqlite/2021-04-30-233251_add_reprompt/up.sql
Normal file
2
migrations/sqlite/2021-04-30-233251_add_reprompt/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN reprompt INTEGER;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE sends
|
||||||
|
ADD COLUMN hide_email BOOLEAN;
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN private_key TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN public_key TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE emergency_access;
|
@@ -0,0 +1,14 @@
|
|||||||
|
CREATE TABLE emergency_access (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
grantor_uuid TEXT REFERENCES users (uuid),
|
||||||
|
grantee_uuid TEXT REFERENCES users (uuid),
|
||||||
|
email TEXT,
|
||||||
|
key_encrypted TEXT,
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
status INTEGER NOT NULL,
|
||||||
|
wait_time_days INTEGER NOT NULL,
|
||||||
|
recovery_initiated_at DATETIME,
|
||||||
|
last_notification_at DATETIME,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
created_at DATETIME NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_incomplete;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE twofactor_incomplete (
|
||||||
|
user_uuid TEXT NOT NULL REFERENCES users(uuid),
|
||||||
|
device_uuid TEXT NOT NULL,
|
||||||
|
device_name TEXT NOT NULL,
|
||||||
|
login_time DATETIME NOT NULL,
|
||||||
|
ip_address TEXT NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, device_uuid)
|
||||||
|
);
|
2
migrations/sqlite/2022-01-17-234911_add_api_key/up.sql
Normal file
2
migrations/sqlite/2022-01-17-234911_add_api_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users
|
||||||
|
ADD COLUMN api_key TEXT;
|
@@ -0,0 +1,23 @@
|
|||||||
|
-- Create new devices table with primary keys on both uuid and user_uuid
|
||||||
|
CREATE TABLE devices_new (
|
||||||
|
uuid TEXT NOT NULL,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
push_token TEXT,
|
||||||
|
refresh_token TEXT NOT NULL,
|
||||||
|
twofactor_remember TEXT,
|
||||||
|
PRIMARY KEY(uuid, user_uuid),
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer current data to new table
|
||||||
|
INSERT INTO devices_new SELECT * FROM devices;
|
||||||
|
|
||||||
|
-- Drop the old table
|
||||||
|
DROP TABLE devices;
|
||||||
|
|
||||||
|
-- Rename the new table to the original name
|
||||||
|
ALTER TABLE devices_new RENAME TO devices;
|
99
resources/vaultwarden-icon-white.svg
Normal file
99
resources/vaultwarden-icon-white.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 8.7 KiB |
86
resources/vaultwarden-icon.svg
Normal file
86
resources/vaultwarden-icon.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 8.4 KiB |
271
resources/vaultwarden-logo-white.svg
Normal file
271
resources/vaultwarden-logo-white.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 17 KiB |
151
resources/vaultwarden-logo.svg
Normal file
151
resources/vaultwarden-logo.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 12 KiB |
@@ -1 +1 @@
|
|||||||
nightly-2021-02-22
|
stable
|
||||||
|
@@ -1,2 +1,7 @@
|
|||||||
version = "Two"
|
# version = "Two"
|
||||||
|
edition = "2021"
|
||||||
max_width = 120
|
max_width = 120
|
||||||
|
newline_style = "Unix"
|
||||||
|
use_small_heuristics = "Off"
|
||||||
|
# struct_lit_single_line = false
|
||||||
|
# overflow_delimited_expr = true
|
||||||
|
353
src/api/admin.rs
353
src/api/admin.rs
@@ -1,28 +1,32 @@
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::{env, process::Command, time::Duration};
|
use std::env;
|
||||||
|
|
||||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
use rocket::serde::json::Json;
|
||||||
use rocket::{
|
use rocket::{
|
||||||
http::{Cookie, Cookies, SameSite},
|
form::Form,
|
||||||
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
http::{Cookie, CookieJar, SameSite, Status},
|
||||||
response::{content::Html, Flash, Redirect},
|
request::{self, FlashMessage, FromRequest, Outcome, Request},
|
||||||
|
response::{content::RawHtml as Html, Flash, Redirect},
|
||||||
Route,
|
Route,
|
||||||
};
|
};
|
||||||
use rocket_contrib::json::Json;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, NumberOrString},
|
api::{ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
mail,
|
mail,
|
||||||
util::{format_naive_datetime_local, get_display_size, is_running_in_docker},
|
util::{
|
||||||
CONFIG,
|
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
||||||
|
},
|
||||||
|
CONFIG, VERSION,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use futures::{stream, stream::StreamExt};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||||
return routes![admin_disabled];
|
return routes![admin_disabled];
|
||||||
@@ -31,6 +35,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
routes![
|
routes![
|
||||||
admin_login,
|
admin_login,
|
||||||
get_users_json,
|
get_users_json,
|
||||||
|
get_user_json,
|
||||||
post_admin_login,
|
post_admin_login,
|
||||||
admin_page,
|
admin_page,
|
||||||
invite_user,
|
invite_user,
|
||||||
@@ -64,23 +69,18 @@ static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
|||||||
.unwrap_or("Unknown")
|
.unwrap_or("Unknown")
|
||||||
});
|
});
|
||||||
|
|
||||||
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
|
static CAN_BACKUP: Lazy<bool> =
|
||||||
DbConnType::from_url(&CONFIG.database_url())
|
Lazy::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false));
|
||||||
.map(|t| t == DbConnType::sqlite)
|
|
||||||
.unwrap_or(false)
|
|
||||||
&& Command::new("sqlite3").arg("-version").status().is_ok()
|
|
||||||
});
|
|
||||||
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
fn admin_disabled() -> &'static str {
|
fn admin_disabled() -> &'static str {
|
||||||
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
|
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
|
||||||
}
|
}
|
||||||
|
|
||||||
const COOKIE_NAME: &str = "BWRS_ADMIN";
|
const COOKIE_NAME: &str = "VW_ADMIN";
|
||||||
const ADMIN_PATH: &str = "/admin";
|
const ADMIN_PATH: &str = "/admin";
|
||||||
|
|
||||||
const BASE_TEMPLATE: &str = "admin/base";
|
const BASE_TEMPLATE: &str = "admin/base";
|
||||||
const VERSION: Option<&str> = option_env!("BWRS_VERSION");
|
|
||||||
|
|
||||||
fn admin_path() -> String {
|
fn admin_path() -> String {
|
||||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||||
@@ -88,10 +88,11 @@ fn admin_path() -> String {
|
|||||||
|
|
||||||
struct Referer(Option<String>);
|
struct Referer(Option<String>);
|
||||||
|
|
||||||
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
#[rocket::async_trait]
|
||||||
|
impl<'r> FromRequest<'r> for Referer {
|
||||||
type Error = ();
|
type Error = ();
|
||||||
|
|
||||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
||||||
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -99,10 +100,11 @@ impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct IpHeader(Option<String>);
|
struct IpHeader(Option<String>);
|
||||||
|
|
||||||
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
#[rocket::async_trait]
|
||||||
|
impl<'r> FromRequest<'r> for IpHeader {
|
||||||
type Error = ();
|
type Error = ();
|
||||||
|
|
||||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
||||||
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
||||||
} else if req.headers().get_one("X-Client-IP").is_some() {
|
} else if req.headers().get_one("X-Client-IP").is_some() {
|
||||||
@@ -139,10 +141,15 @@ fn admin_url(referer: Referer) -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/", rank = 2)]
|
#[get("/", rank = 2)]
|
||||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
fn admin_login(flash: Option<FlashMessage<'_>>) -> ApiResult<Html<String>> {
|
||||||
// If there is an error, show it
|
// If there is an error, show it
|
||||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
let msg = flash.map(|msg| format!("{}: {}", msg.kind(), msg.message()));
|
||||||
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg, "urlpath": CONFIG.domain_path()});
|
let json = json!({
|
||||||
|
"page_content": "admin/login",
|
||||||
|
"version": VERSION,
|
||||||
|
"error": msg,
|
||||||
|
"urlpath": CONFIG.domain_path()
|
||||||
|
});
|
||||||
|
|
||||||
// Return the page
|
// Return the page
|
||||||
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
||||||
@@ -157,19 +164,20 @@ struct LoginForm {
|
|||||||
#[post("/", data = "<data>")]
|
#[post("/", data = "<data>")]
|
||||||
fn post_admin_login(
|
fn post_admin_login(
|
||||||
data: Form<LoginForm>,
|
data: Form<LoginForm>,
|
||||||
mut cookies: Cookies,
|
cookies: &CookieJar<'_>,
|
||||||
ip: ClientIp,
|
ip: ClientIp,
|
||||||
referer: Referer,
|
referer: Referer,
|
||||||
) -> Result<Redirect, Flash<Redirect>> {
|
) -> Result<Redirect, Flash<Redirect>> {
|
||||||
let data = data.into_inner();
|
let data = data.into_inner();
|
||||||
|
|
||||||
|
if crate::ratelimit::check_limit_admin(&ip.ip).is_err() {
|
||||||
|
return Err(Flash::error(Redirect::to(admin_url(referer)), "Too many requests, try again later."));
|
||||||
|
}
|
||||||
|
|
||||||
// If the token is invalid, redirect to login page
|
// If the token is invalid, redirect to login page
|
||||||
if !_validate_token(&data.token) {
|
if !_validate_token(&data.token) {
|
||||||
error!("Invalid admin token. IP: {}", ip.ip);
|
error!("Invalid admin token. IP: {}", ip.ip);
|
||||||
Err(Flash::error(
|
Err(Flash::error(Redirect::to(admin_url(referer)), "Invalid admin token, please try again."))
|
||||||
Redirect::to(admin_url(referer)),
|
|
||||||
"Invalid admin token, please try again.",
|
|
||||||
))
|
|
||||||
} else {
|
} else {
|
||||||
// If the token received is valid, generate JWT and save it as a cookie
|
// If the token received is valid, generate JWT and save it as a cookie
|
||||||
let claims = generate_admin_claims();
|
let claims = generate_admin_claims();
|
||||||
@@ -177,7 +185,7 @@ fn post_admin_login(
|
|||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(time::Duration::minutes(20))
|
.max_age(rocket::time::Duration::minutes(20))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true)
|
||||||
.finish();
|
.finish();
|
||||||
@@ -198,9 +206,7 @@ fn _validate_token(token: &str) -> bool {
|
|||||||
struct AdminTemplateData {
|
struct AdminTemplateData {
|
||||||
page_content: String,
|
page_content: String,
|
||||||
version: Option<&'static str>,
|
version: Option<&'static str>,
|
||||||
users: Option<Vec<Value>>,
|
page_data: Option<Value>,
|
||||||
organizations: Option<Vec<Value>>,
|
|
||||||
diagnostics: Option<Value>,
|
|
||||||
config: Value,
|
config: Value,
|
||||||
can_backup: bool,
|
can_backup: bool,
|
||||||
logged_in: bool,
|
logged_in: bool,
|
||||||
@@ -216,51 +222,19 @@ impl AdminTemplateData {
|
|||||||
can_backup: *CAN_BACKUP,
|
can_backup: *CAN_BACKUP,
|
||||||
logged_in: true,
|
logged_in: true,
|
||||||
urlpath: CONFIG.domain_path(),
|
urlpath: CONFIG.domain_path(),
|
||||||
users: None,
|
page_data: None,
|
||||||
organizations: None,
|
|
||||||
diagnostics: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn users(users: Vec<Value>) -> Self {
|
fn with_data(page_content: &str, page_data: Value) -> Self {
|
||||||
Self {
|
Self {
|
||||||
page_content: String::from("admin/users"),
|
page_content: String::from(page_content),
|
||||||
version: VERSION,
|
version: VERSION,
|
||||||
users: Some(users),
|
page_data: Some(page_data),
|
||||||
config: CONFIG.prepare_json(),
|
config: CONFIG.prepare_json(),
|
||||||
can_backup: *CAN_BACKUP,
|
can_backup: *CAN_BACKUP,
|
||||||
logged_in: true,
|
logged_in: true,
|
||||||
urlpath: CONFIG.domain_path(),
|
urlpath: CONFIG.domain_path(),
|
||||||
organizations: None,
|
|
||||||
diagnostics: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn organizations(organizations: Vec<Value>) -> Self {
|
|
||||||
Self {
|
|
||||||
page_content: String::from("admin/organizations"),
|
|
||||||
version: VERSION,
|
|
||||||
organizations: Some(organizations),
|
|
||||||
config: CONFIG.prepare_json(),
|
|
||||||
can_backup: *CAN_BACKUP,
|
|
||||||
logged_in: true,
|
|
||||||
urlpath: CONFIG.domain_path(),
|
|
||||||
users: None,
|
|
||||||
diagnostics: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn diagnostics(diagnostics: Value) -> Self {
|
|
||||||
Self {
|
|
||||||
page_content: String::from("admin/diagnostics"),
|
|
||||||
version: VERSION,
|
|
||||||
organizations: None,
|
|
||||||
config: CONFIG.prepare_json(),
|
|
||||||
can_backup: *CAN_BACKUP,
|
|
||||||
logged_in: true,
|
|
||||||
urlpath: CONFIG.domain_path(),
|
|
||||||
users: None,
|
|
||||||
diagnostics: Some(diagnostics),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -270,7 +244,7 @@ impl AdminTemplateData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/", rank = 1)]
|
#[get("/", rank = 1)]
|
||||||
fn admin_page(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
|
||||||
let text = AdminTemplateData::new().render()?;
|
let text = AdminTemplateData::new().render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
@@ -281,25 +255,39 @@ struct InviteData {
|
|||||||
email: String,
|
email: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||||
|
if let Some(user) = User::find_by_uuid(uuid, conn).await {
|
||||||
|
Ok(user)
|
||||||
|
} else {
|
||||||
|
err_code!("User doesn't exist", Status::NotFound.code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/invite", data = "<data>")]
|
#[post("/invite", data = "<data>")]
|
||||||
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
async fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
let email = data.email.clone();
|
let email = data.email.clone();
|
||||||
if User::find_by_mail(&data.email, &conn).is_some() {
|
if User::find_by_mail(&data.email, &conn).await.is_some() {
|
||||||
err!("User already exists")
|
err_code!("User already exists", Status::Conflict.code)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = User::new(email);
|
let mut user = User::new(email);
|
||||||
user.save(&conn)?;
|
|
||||||
|
|
||||||
|
async fn _generate_invite(user: &User, conn: &DbConn) -> EmptyResult {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)
|
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)
|
||||||
} else {
|
} else {
|
||||||
let invitation = Invitation::new(data.email);
|
let invitation = Invitation::new(user.email.clone());
|
||||||
invitation.save(&conn)
|
invitation.save(conn).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_generate_invite(&user, &conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||||
|
user.save(&conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||||
|
|
||||||
|
Ok(Json(user.to_json(&conn).await))
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/test/smtp", data = "<data>")]
|
#[post("/test/smtp", data = "<data>")]
|
||||||
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
@@ -312,82 +300,96 @@ fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
|
fn logout(cookies: &CookieJar<'_>, referer: Referer) -> Redirect {
|
||||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||||
Redirect::to(admin_url(referer))
|
Redirect::to(admin_url(referer))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users")]
|
#[get("/users")]
|
||||||
fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
async fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||||
let users = User::get_all(&conn);
|
let users_json = stream::iter(User::get_all(&conn).await)
|
||||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
.then(|u| async {
|
||||||
|
let u = u; // Move out this single variable
|
||||||
|
u.to_json(&conn).await
|
||||||
|
})
|
||||||
|
.collect::<Vec<Value>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
Json(Value::Array(users_json))
|
Json(Value::Array(users_json))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users/overview")]
|
#[get("/users/overview")]
|
||||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
async fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
let users = User::get_all(&conn);
|
const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z";
|
||||||
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
|
||||||
let users_json: Vec<Value> = users.iter()
|
let users_json = stream::iter(User::get_all(&conn).await)
|
||||||
.map(|u| {
|
.then(|u| async {
|
||||||
let mut usr = u.to_json(&conn);
|
let u = u; // Move out this single variable
|
||||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
let mut usr = u.to_json(&conn).await;
|
||||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn).await);
|
||||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn).await);
|
||||||
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn).await as i32));
|
||||||
usr["user_enabled"] = json!(u.enabled);
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
usr["last_active"] = match u.last_active(&conn) {
|
usr["last_active"] = match u.last_active(&conn).await {
|
||||||
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||||
None => json!("Never")
|
None => json!("Never"),
|
||||||
};
|
};
|
||||||
usr
|
usr
|
||||||
})
|
})
|
||||||
.collect();
|
.collect::<Vec<Value>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
let text = AdminTemplateData::users(users_json).render()?;
|
let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/users/<uuid>")]
|
||||||
|
async fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||||
|
let user = get_user_or_404(&uuid, &conn).await?;
|
||||||
|
|
||||||
|
Ok(Json(user.to_json(&conn).await))
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/delete")]
|
#[post("/users/<uuid>/delete")]
|
||||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
async fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
let user = get_user_or_404(&uuid, &conn).await?;
|
||||||
user.delete(&conn)
|
user.delete(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/deauth")]
|
#[post("/users/<uuid>/deauth")]
|
||||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
async fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/disable")]
|
#[post("/users/<uuid>/disable")]
|
||||||
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
async fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
user.enabled = false;
|
user.enabled = false;
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/enable")]
|
#[post("/users/<uuid>/enable")]
|
||||||
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
async fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||||
user.enabled = true;
|
user.enabled = true;
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
async fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
TwoFactor::delete_all_by_user(&user.uuid, &conn).await?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -398,10 +400,10 @@ struct UserOrgTypeData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/org_type", data = "<data>")]
|
#[post("/users/org_type", data = "<data>")]
|
||||||
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
async fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let data: UserOrgTypeData = data.into_inner();
|
let data: UserOrgTypeData = data.into_inner();
|
||||||
|
|
||||||
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn).await {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err!("The specified user isn't member of the organization"),
|
None => err!("The specified user isn't member of the organization"),
|
||||||
};
|
};
|
||||||
@@ -413,7 +415,8 @@ fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: D
|
|||||||
|
|
||||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||||
// Removing owner permmission, check that there are at least another owner
|
// Removing owner permmission, check that there are at least another owner
|
||||||
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
let num_owners =
|
||||||
|
UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).await.len();
|
||||||
|
|
||||||
if num_owners <= 1 {
|
if num_owners <= 1 {
|
||||||
err!("Can't change the type of the last owner")
|
err!("Can't change the type of the last owner")
|
||||||
@@ -421,37 +424,37 @@ fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: D
|
|||||||
}
|
}
|
||||||
|
|
||||||
user_to_edit.atype = new_type as i32;
|
user_to_edit.atype = new_type as i32;
|
||||||
user_to_edit.save(&conn)
|
user_to_edit.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[post("/users/update_revision")]
|
#[post("/users/update_revision")]
|
||||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
async fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
User::update_all_revisions(&conn)
|
User::update_all_revisions(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/overview")]
|
#[get("/organizations/overview")]
|
||||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
async fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
let organizations = Organization::get_all(&conn);
|
let organizations_json = stream::iter(Organization::get_all(&conn).await)
|
||||||
let organizations_json: Vec<Value> = organizations.iter()
|
.then(|o| async {
|
||||||
.map(|o| {
|
let o = o; //Move out this single variable
|
||||||
let mut org = o.to_json();
|
let mut org = o.to_json();
|
||||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn).await);
|
||||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn).await);
|
||||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn).await);
|
||||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn).await as i32));
|
||||||
org
|
org
|
||||||
})
|
})
|
||||||
.collect();
|
.collect::<Vec<Value>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<uuid>/delete")]
|
#[post("/organizations/<uuid>/delete")]
|
||||||
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
async fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
let org = Organization::find_by_uuid(&uuid, &conn).await.map_res("Organization doesn't exist")?;
|
||||||
org.delete(&conn)
|
org.delete(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -469,46 +472,42 @@ struct GitCommit {
|
|||||||
sha: String,
|
sha: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
async fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||||
let github_api = Client::builder().build()?;
|
let github_api = get_reqwest_client();
|
||||||
|
|
||||||
Ok(github_api
|
Ok(github_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||||
.get(url)
|
|
||||||
.timeout(Duration::from_secs(10))
|
|
||||||
.header(USER_AGENT, "Bitwarden_RS")
|
|
||||||
.send()?
|
|
||||||
.error_for_status()?
|
|
||||||
.json::<T>()?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn has_http_access() -> bool {
|
async fn has_http_access() -> bool {
|
||||||
let http_access = Client::builder().build().unwrap();
|
let http_access = get_reqwest_client();
|
||||||
|
|
||||||
match http_access
|
match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await {
|
||||||
.head("https://github.com/dani-garcia/bitwarden_rs")
|
|
||||||
.timeout(Duration::from_secs(10))
|
|
||||||
.header(USER_AGENT, "Bitwarden_RS")
|
|
||||||
.send()
|
|
||||||
{
|
|
||||||
Ok(r) => r.status().is_success(),
|
Ok(r) => r.status().is_success(),
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/diagnostics")]
|
#[get("/diagnostics")]
|
||||||
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
use crate::util::read_file_string;
|
use crate::util::read_file_string;
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use std::net::ToSocketAddrs;
|
use std::net::ToSocketAddrs;
|
||||||
|
|
||||||
// Get current running versions
|
// Get current running versions
|
||||||
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
let web_vault_version: WebVaultVersion =
|
||||||
let vault_version_str = read_file_string(&vault_version_path)?;
|
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
||||||
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
Ok(s) => serde_json::from_str(&s)?,
|
||||||
|
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||||
|
Ok(s) => serde_json::from_str(&s)?,
|
||||||
|
_ => WebVaultVersion {
|
||||||
|
version: String::from("Version file missing"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
// Execute some environment checks
|
// Execute some environment checks
|
||||||
let running_within_docker = is_running_in_docker();
|
let running_within_docker = is_running_in_docker();
|
||||||
let has_http_access = has_http_access();
|
let has_http_access = has_http_access().await;
|
||||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||||
|| env::var_os("http_proxy").is_some()
|
|| env::var_os("http_proxy").is_some()
|
||||||
|| env::var_os("HTTPS_PROXY").is_some()
|
|| env::var_os("HTTPS_PROXY").is_some()
|
||||||
@@ -524,11 +523,14 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
|||||||
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||||
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||||
(
|
(
|
||||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
|
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||||
|
.await
|
||||||
|
{
|
||||||
Ok(r) => r.tag_name,
|
Ok(r) => r.tag_name,
|
||||||
_ => "-".to_string(),
|
_ => "-".to_string(),
|
||||||
},
|
},
|
||||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
|
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await
|
||||||
|
{
|
||||||
Ok(mut c) => {
|
Ok(mut c) => {
|
||||||
c.sha.truncate(8);
|
c.sha.truncate(8);
|
||||||
c.sha
|
c.sha
|
||||||
@@ -540,7 +542,11 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
|||||||
if running_within_docker {
|
if running_within_docker {
|
||||||
"-".to_string()
|
"-".to_string()
|
||||||
} else {
|
} else {
|
||||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
|
match get_github_api::<GitRelease>(
|
||||||
|
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||||
_ => "-".to_string(),
|
_ => "-".to_string(),
|
||||||
}
|
}
|
||||||
@@ -552,16 +558,18 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
|||||||
|
|
||||||
let ip_header_name = match &ip_header.0 {
|
let ip_header_name = match &ip_header.0 {
|
||||||
Some(h) => h,
|
Some(h) => h,
|
||||||
_ => ""
|
_ => "",
|
||||||
};
|
};
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
"dns_resolved": dns_resolved,
|
"dns_resolved": dns_resolved,
|
||||||
"web_vault_version": web_vault_version.version,
|
|
||||||
"latest_release": latest_release,
|
"latest_release": latest_release,
|
||||||
"latest_commit": latest_commit,
|
"latest_commit": latest_commit,
|
||||||
|
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||||
|
"web_vault_version": web_vault_version.version,
|
||||||
"latest_web_build": latest_web_build,
|
"latest_web_build": latest_web_build,
|
||||||
"running_within_docker": running_within_docker,
|
"running_within_docker": running_within_docker,
|
||||||
|
"docker_base_image": docker_base_image(),
|
||||||
"has_http_access": has_http_access,
|
"has_http_access": has_http_access,
|
||||||
"ip_header_exists": &ip_header.0.is_some(),
|
"ip_header_exists": &ip_header.0.is_some(),
|
||||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||||
@@ -569,12 +577,14 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
|||||||
"ip_header_config": &CONFIG.ip_header(),
|
"ip_header_config": &CONFIG.ip_header(),
|
||||||
"uses_proxy": uses_proxy,
|
"uses_proxy": uses_proxy,
|
||||||
"db_type": *DB_TYPE,
|
"db_type": *DB_TYPE,
|
||||||
"db_version": get_sql_server_version(&conn),
|
"db_version": get_sql_server_version(&conn).await,
|
||||||
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||||
|
"overrides": &CONFIG.get_overrides().join(", "),
|
||||||
|
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||||
});
|
});
|
||||||
|
|
||||||
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
let text = AdminTemplateData::with_data("admin/diagnostics", diagnostics_json).render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -596,38 +606,39 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/config/backup_db")]
|
#[post("/config/backup_db")]
|
||||||
fn backup_db(_token: AdminToken) -> EmptyResult {
|
async fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
if *CAN_BACKUP {
|
if *CAN_BACKUP {
|
||||||
backup_database()
|
backup_database(&conn).await
|
||||||
} else {
|
} else {
|
||||||
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
|
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct AdminToken {}
|
pub struct AdminToken {}
|
||||||
|
|
||||||
impl<'a, 'r> FromRequest<'a, 'r> for AdminToken {
|
#[rocket::async_trait]
|
||||||
|
impl<'r> FromRequest<'r> for AdminToken {
|
||||||
type Error = &'static str;
|
type Error = &'static str;
|
||||||
|
|
||||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
||||||
if CONFIG.disable_admin_token() {
|
if CONFIG.disable_admin_token() {
|
||||||
Outcome::Success(AdminToken {})
|
Outcome::Success(AdminToken {})
|
||||||
} else {
|
} else {
|
||||||
let mut cookies = request.cookies();
|
let cookies = request.cookies();
|
||||||
|
|
||||||
let access_token = match cookies.get(COOKIE_NAME) {
|
let access_token = match cookies.get(COOKIE_NAME) {
|
||||||
Some(cookie) => cookie.value(),
|
Some(cookie) => cookie.value(),
|
||||||
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
|
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
|
||||||
};
|
};
|
||||||
|
|
||||||
let ip = match request.guard::<ClientIp>() {
|
let ip = match ClientIp::from_request(request).await {
|
||||||
Outcome::Success(ip) => ip.ip,
|
Outcome::Success(ip) => ip.ip,
|
||||||
_ => err_handler!("Error getting Client IP"),
|
_ => err_handler!("Error getting Client IP"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if decode_admin(access_token).is_err() {
|
if decode_admin(access_token).is_err() {
|
||||||
// Remove admin cookie
|
// Remove admin cookie
|
||||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||||
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
||||||
return Outcome::Forward(());
|
return Outcome::Forward(());
|
||||||
}
|
}
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use rocket_contrib::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -34,6 +34,8 @@ pub fn routes() -> Vec<rocket::Route> {
|
|||||||
password_hint,
|
password_hint,
|
||||||
prelogin,
|
prelogin,
|
||||||
verify_password,
|
verify_password,
|
||||||
|
api_key,
|
||||||
|
rotate_api_key,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,6 +51,7 @@ struct RegisterData {
|
|||||||
MasterPasswordHint: Option<String>,
|
MasterPasswordHint: Option<String>,
|
||||||
Name: Option<String>,
|
Name: Option<String>,
|
||||||
Token: Option<String>,
|
Token: Option<String>,
|
||||||
|
#[allow(dead_code)]
|
||||||
OrganizationUserId: Option<String>,
|
OrganizationUserId: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -60,13 +63,14 @@ struct KeysData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/register", data = "<data>")]
|
#[post("/accounts/register", data = "<data>")]
|
||||||
fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||||
let data: RegisterData = data.into_inner().data;
|
let data: RegisterData = data.into_inner().data;
|
||||||
|
let email = data.Email.to_lowercase();
|
||||||
|
|
||||||
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
let mut user = match User::find_by_mail(&email, &conn).await {
|
||||||
Some(user) => {
|
Some(user) => {
|
||||||
if !user.password_hash.is_empty() {
|
if !user.password_hash.is_empty() {
|
||||||
if CONFIG.is_signup_allowed(&data.Email) {
|
if CONFIG.is_signup_allowed(&email) {
|
||||||
err!("User already exists")
|
err!("User already exists")
|
||||||
} else {
|
} else {
|
||||||
err!("Registration not allowed or user already exists")
|
err!("Registration not allowed or user already exists")
|
||||||
@@ -75,19 +79,20 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
|
|
||||||
if let Some(token) = data.Token {
|
if let Some(token) = data.Token {
|
||||||
let claims = decode_invite(&token)?;
|
let claims = decode_invite(&token)?;
|
||||||
if claims.email == data.Email {
|
if claims.email == email {
|
||||||
user
|
user
|
||||||
} else {
|
} else {
|
||||||
err!("Registration email does not match invite email")
|
err!("Registration email does not match invite email")
|
||||||
}
|
}
|
||||||
} else if Invitation::take(&data.Email, &conn) {
|
} else if Invitation::take(&email, &conn).await {
|
||||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
|
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).await.iter_mut() {
|
||||||
user_org.status = UserOrgStatus::Accepted as i32;
|
user_org.status = UserOrgStatus::Accepted as i32;
|
||||||
user_org.save(&conn)?;
|
user_org.save(&conn).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
user
|
user
|
||||||
} else if CONFIG.is_signup_allowed(&data.Email) {
|
} else if EmergencyAccess::find_invited_by_grantee_email(&email, &conn).await.is_some() {
|
||||||
|
user
|
||||||
|
} else if CONFIG.is_signup_allowed(&email) {
|
||||||
err!("Account with this email already exists")
|
err!("Account with this email already exists")
|
||||||
} else {
|
} else {
|
||||||
err!("Registration not allowed or user already exists")
|
err!("Registration not allowed or user already exists")
|
||||||
@@ -95,10 +100,10 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
// Order is important here; the invitation check must come first
|
// Order is important here; the invitation check must come first
|
||||||
// because the bitwarden_rs admin can invite anyone, regardless
|
// because the vaultwarden admin can invite anyone, regardless
|
||||||
// of other signup restrictions.
|
// of other signup restrictions.
|
||||||
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
if Invitation::take(&email, &conn).await || CONFIG.is_signup_allowed(&email) {
|
||||||
User::new(data.Email.clone())
|
User::new(email.clone())
|
||||||
} else {
|
} else {
|
||||||
err!("Registration not allowed or user already exists")
|
err!("Registration not allowed or user already exists")
|
||||||
}
|
}
|
||||||
@@ -106,7 +111,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Make sure we don't leave a lingering invitation.
|
// Make sure we don't leave a lingering invitation.
|
||||||
Invitation::take(&data.Email, &conn);
|
Invitation::take(&email, &conn).await;
|
||||||
|
|
||||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||||
user.client_kdf_iter = client_kdf_iter;
|
user.client_kdf_iter = client_kdf_iter;
|
||||||
@@ -145,12 +150,12 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/accounts/profile")]
|
#[get("/accounts/profile")]
|
||||||
fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
async fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
Json(headers.user.to_json(&conn))
|
Json(headers.user.to_json(&conn).await)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -163,12 +168,12 @@ struct ProfileData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/accounts/profile", data = "<data>")]
|
#[put("/accounts/profile", data = "<data>")]
|
||||||
fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
post_profile(data, headers, conn)
|
post_profile(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/profile", data = "<data>")]
|
#[post("/accounts/profile", data = "<data>")]
|
||||||
fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: ProfileData = data.into_inner().data;
|
let data: ProfileData = data.into_inner().data;
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
@@ -178,13 +183,13 @@ fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -
|
|||||||
Some(ref h) if h.is_empty() => None,
|
Some(ref h) if h.is_empty() => None,
|
||||||
_ => data.MasterPasswordHint,
|
_ => data.MasterPasswordHint,
|
||||||
};
|
};
|
||||||
user.save(&conn)?;
|
user.save(&conn).await?;
|
||||||
Ok(Json(user.to_json(&conn)))
|
Ok(Json(user.to_json(&conn).await))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users/<uuid>/public-key")]
|
#[get("/users/<uuid>/public-key")]
|
||||||
fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
|
async fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
let user = match User::find_by_uuid(&uuid, &conn).await {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err!("User doesn't exist"),
|
None => err!("User doesn't exist"),
|
||||||
};
|
};
|
||||||
@@ -197,7 +202,7 @@ fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/keys", data = "<data>")]
|
#[post("/accounts/keys", data = "<data>")]
|
||||||
fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: KeysData = data.into_inner().data;
|
let data: KeysData = data.into_inner().data;
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
@@ -205,7 +210,7 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
|||||||
user.private_key = Some(data.EncryptedPrivateKey);
|
user.private_key = Some(data.EncryptedPrivateKey);
|
||||||
user.public_key = Some(data.PublicKey);
|
user.public_key = Some(data.PublicKey);
|
||||||
|
|
||||||
user.save(&conn)?;
|
user.save(&conn).await?;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"PrivateKey": user.private_key,
|
"PrivateKey": user.private_key,
|
||||||
@@ -223,7 +228,7 @@ struct ChangePassData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/password", data = "<data>")]
|
#[post("/accounts/password", data = "<data>")]
|
||||||
fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
let data: ChangePassData = data.into_inner().data;
|
let data: ChangePassData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -231,9 +236,12 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash, Some("post_rotatekey"));
|
user.set_password(
|
||||||
|
&data.NewMasterPasswordHash,
|
||||||
|
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
|
||||||
|
);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -248,7 +256,7 @@ struct ChangeKdfData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/kdf", data = "<data>")]
|
#[post("/accounts/kdf", data = "<data>")]
|
||||||
fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
let data: ChangeKdfData = data.into_inner().data;
|
let data: ChangeKdfData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -260,7 +268,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
|||||||
user.client_kdf_type = data.Kdf;
|
user.client_kdf_type = data.Kdf;
|
||||||
user.set_password(&data.NewMasterPasswordHash, None);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -283,7 +291,7 @@ struct KeyData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/key", data = "<data>")]
|
#[post("/accounts/key", data = "<data>")]
|
||||||
fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let data: KeyData = data.into_inner().data;
|
let data: KeyData = data.into_inner().data;
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
@@ -294,7 +302,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||||||
|
|
||||||
// Update folder data
|
// Update folder data
|
||||||
for folder_data in data.Folders {
|
for folder_data in data.Folders {
|
||||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn) {
|
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn).await {
|
||||||
Some(folder) => folder,
|
Some(folder) => folder,
|
||||||
None => err!("Folder doesn't exist"),
|
None => err!("Folder doesn't exist"),
|
||||||
};
|
};
|
||||||
@@ -304,14 +312,14 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||||||
}
|
}
|
||||||
|
|
||||||
saved_folder.name = folder_data.Name;
|
saved_folder.name = folder_data.Name;
|
||||||
saved_folder.save(&conn)?
|
saved_folder.save(&conn).await?
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update cipher data
|
// Update cipher data
|
||||||
use super::ciphers::update_cipher_from_data;
|
use super::ciphers::update_cipher_from_data;
|
||||||
|
|
||||||
for cipher_data in data.Ciphers {
|
for cipher_data in data.Ciphers {
|
||||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn) {
|
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn).await {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
@@ -320,15 +328,9 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||||||
err!("The cipher is not owned by the user")
|
err!("The cipher is not owned by the user")
|
||||||
}
|
}
|
||||||
|
|
||||||
update_cipher_from_data(
|
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||||
&mut saved_cipher,
|
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||||
cipher_data,
|
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None).await?
|
||||||
&headers,
|
|
||||||
false,
|
|
||||||
&conn,
|
|
||||||
&nt,
|
|
||||||
UpdateType::CipherUpdate,
|
|
||||||
)?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update user data
|
// Update user data
|
||||||
@@ -337,13 +339,12 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.private_key = Some(data.PrivateKey);
|
user.private_key = Some(data.PrivateKey);
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
user.reset_stamp_exception();
|
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/security-stamp", data = "<data>")]
|
#[post("/accounts/security-stamp", data = "<data>")]
|
||||||
fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -351,9 +352,9 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -364,7 +365,7 @@ struct EmailTokenData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/email-token", data = "<data>")]
|
#[post("/accounts/email-token", data = "<data>")]
|
||||||
fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
let data: EmailTokenData = data.into_inner().data;
|
let data: EmailTokenData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -372,7 +373,7 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
|
if User::find_by_mail(&data.NewEmail, &conn).await.is_some() {
|
||||||
err!("Email already in use");
|
err!("Email already in use");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -380,7 +381,7 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
|||||||
err!("Email domain not allowed");
|
err!("Email domain not allowed");
|
||||||
}
|
}
|
||||||
|
|
||||||
let token = crypto::generate_token(6)?;
|
let token = crypto::generate_email_token(6);
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
if let Err(e) = mail::send_change_email(&data.NewEmail, &token) {
|
if let Err(e) = mail::send_change_email(&data.NewEmail, &token) {
|
||||||
@@ -390,7 +391,7 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
|||||||
|
|
||||||
user.email_new = Some(data.NewEmail);
|
user.email_new = Some(data.NewEmail);
|
||||||
user.email_new_token = Some(token);
|
user.email_new_token = Some(token);
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -405,7 +406,7 @@ struct ChangeEmailData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/email", data = "<data>")]
|
#[post("/accounts/email", data = "<data>")]
|
||||||
fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
let data: ChangeEmailData = data.into_inner().data;
|
let data: ChangeEmailData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -413,7 +414,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
|
if User::find_by_mail(&data.NewEmail, &conn).await.is_some() {
|
||||||
err!("Email already in use");
|
err!("Email already in use");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -448,11 +449,11 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||||||
user.set_password(&data.NewMasterPasswordHash, None);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/verify-email")]
|
#[post("/accounts/verify-email")]
|
||||||
fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
|
fn post_verify_email(headers: Headers) -> EmptyResult {
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !CONFIG.mail_enabled() {
|
if !CONFIG.mail_enabled() {
|
||||||
@@ -474,10 +475,10 @@ struct VerifyEmailTokenData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/verify-email-token", data = "<data>")]
|
#[post("/accounts/verify-email-token", data = "<data>")]
|
||||||
fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn) -> EmptyResult {
|
async fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn) -> EmptyResult {
|
||||||
let data: VerifyEmailTokenData = data.into_inner().data;
|
let data: VerifyEmailTokenData = data.into_inner().data;
|
||||||
|
|
||||||
let mut user = match User::find_by_uuid(&data.UserId, &conn) {
|
let mut user = match User::find_by_uuid(&data.UserId, &conn).await {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err!("User doesn't exist"),
|
None => err!("User doesn't exist"),
|
||||||
};
|
};
|
||||||
@@ -492,7 +493,7 @@ fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn)
|
|||||||
user.verified_at = Some(Utc::now().naive_utc());
|
user.verified_at = Some(Utc::now().naive_utc());
|
||||||
user.last_verifying_at = None;
|
user.last_verifying_at = None;
|
||||||
user.login_verify_count = 0;
|
user.login_verify_count = 0;
|
||||||
if let Err(e) = user.save(&conn) {
|
if let Err(e) = user.save(&conn).await {
|
||||||
error!("Error saving email verification: {:#?}", e);
|
error!("Error saving email verification: {:#?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -506,13 +507,11 @@ struct DeleteRecoverData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete-recover", data = "<data>")]
|
#[post("/accounts/delete-recover", data = "<data>")]
|
||||||
fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, conn: DbConn) -> EmptyResult {
|
async fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, conn: DbConn) -> EmptyResult {
|
||||||
let data: DeleteRecoverData = data.into_inner().data;
|
let data: DeleteRecoverData = data.into_inner().data;
|
||||||
|
|
||||||
let user = User::find_by_mail(&data.Email, &conn);
|
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
if let Some(user) = user {
|
if let Some(user) = User::find_by_mail(&data.Email, &conn).await {
|
||||||
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid) {
|
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid) {
|
||||||
error!("Error sending delete account email: {:#?}", e);
|
error!("Error sending delete account email: {:#?}", e);
|
||||||
}
|
}
|
||||||
@@ -535,10 +534,10 @@ struct DeleteRecoverTokenData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete-recover-token", data = "<data>")]
|
#[post("/accounts/delete-recover-token", data = "<data>")]
|
||||||
fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbConn) -> EmptyResult {
|
async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbConn) -> EmptyResult {
|
||||||
let data: DeleteRecoverTokenData = data.into_inner().data;
|
let data: DeleteRecoverTokenData = data.into_inner().data;
|
||||||
|
|
||||||
let user = match User::find_by_uuid(&data.UserId, &conn) {
|
let user = match User::find_by_uuid(&data.UserId, &conn).await {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err!("User doesn't exist"),
|
None => err!("User doesn't exist"),
|
||||||
};
|
};
|
||||||
@@ -550,16 +549,16 @@ fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbC
|
|||||||
if claims.sub != user.uuid {
|
if claims.sub != user.uuid {
|
||||||
err!("Invalid claim");
|
err!("Invalid claim");
|
||||||
}
|
}
|
||||||
user.delete(&conn)
|
user.delete(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete", data = "<data>")]
|
#[post("/accounts/delete", data = "<data>")]
|
||||||
fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
delete_account(data, headers, conn)
|
delete_account(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/accounts", data = "<data>")]
|
#[delete("/accounts", data = "<data>")]
|
||||||
fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
@@ -567,7 +566,7 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
user.delete(&conn)
|
user.delete(&conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/accounts/revision-date")]
|
#[get("/accounts/revision-date")]
|
||||||
@@ -583,38 +582,62 @@ struct PasswordHintData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/password-hint", data = "<data>")]
|
#[post("/accounts/password-hint", data = "<data>")]
|
||||||
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
async fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.mail_enabled() && !CONFIG.show_password_hint() {
|
||||||
|
err!("This server is not configured to provide password hints.");
|
||||||
|
}
|
||||||
|
|
||||||
|
const NO_HINT: &str = "Sorry, you have no password hint...";
|
||||||
|
|
||||||
let data: PasswordHintData = data.into_inner().data;
|
let data: PasswordHintData = data.into_inner().data;
|
||||||
|
let email = &data.Email;
|
||||||
|
|
||||||
let hint = match User::find_by_mail(&data.Email, &conn) {
|
match User::find_by_mail(email, &conn).await {
|
||||||
Some(user) => user.password_hint,
|
None => {
|
||||||
None => return Ok(()),
|
// To prevent user enumeration, act as if the user exists.
|
||||||
};
|
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_password_hint(&data.Email, hint)?;
|
// There is still a timing side channel here in that the code
|
||||||
} else if CONFIG.show_password_hint() {
|
// paths that send mail take noticeably longer than ones that
|
||||||
if let Some(hint) = hint {
|
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||||
err!(format!("Your password hint is: {}", &hint));
|
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||||
} else {
|
let mut rng = SmallRng::from_entropy();
|
||||||
err!("Sorry, you have no password hint...");
|
let delta: i32 = 100;
|
||||||
}
|
let sleep_ms = (1_000 + rng.gen_range(-delta..=delta)) as u64;
|
||||||
}
|
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
} else {
|
||||||
|
err!(NO_HINT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(user) => {
|
||||||
|
let hint: Option<String> = user.password_hint;
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_password_hint(email, hint)?;
|
||||||
|
Ok(())
|
||||||
|
} else if let Some(hint) = hint {
|
||||||
|
err!(format!("Your password hint is: {}", hint));
|
||||||
|
} else {
|
||||||
|
err!(NO_HINT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct PreloginData {
|
pub struct PreloginData {
|
||||||
Email: String,
|
Email: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/prelogin", data = "<data>")]
|
#[post("/accounts/prelogin", data = "<data>")]
|
||||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||||
|
_prelogin(data, conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn _prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||||
let data: PreloginData = data.into_inner().data;
|
let data: PreloginData = data.into_inner().data;
|
||||||
|
|
||||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn).await {
|
||||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
||||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||||
};
|
};
|
||||||
@@ -624,15 +647,17 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
|||||||
"KdfIterations": kdf_iter
|
"KdfIterations": kdf_iter
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// https://github.com/bitwarden/server/blob/master/src/Api/Models/Request/Accounts/SecretVerificationRequestModel.cs
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct VerifyPasswordData {
|
struct SecretVerificationRequest {
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/verify-password", data = "<data>")]
|
#[post("/accounts/verify-password", data = "<data>")]
|
||||||
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
|
fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers) -> EmptyResult {
|
||||||
let data: VerifyPasswordData = data.into_inner().data;
|
let data: SecretVerificationRequest = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
@@ -641,3 +666,37 @@ fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn _api_key(
|
||||||
|
data: JsonUpcase<SecretVerificationRequest>,
|
||||||
|
rotate: bool,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
let data: SecretVerificationRequest = data.into_inner().data;
|
||||||
|
let mut user = headers.user;
|
||||||
|
|
||||||
|
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rotate || user.api_key.is_none() {
|
||||||
|
user.api_key = Some(crypto::generate_api_key());
|
||||||
|
user.save(&conn).await.expect("Error saving API key");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"ApiKey": user.api_key,
|
||||||
|
"Object": "apiKey",
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/api-key", data = "<data>")]
|
||||||
|
async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
_api_key(data, false, headers, conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/rotate-api-key", data = "<data>")]
|
||||||
|
async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
_api_key(data, true, headers, conn).await
|
||||||
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
837
src/api/core/emergency_access.rs
Normal file
837
src/api/core/emergency_access.rs
Normal file
@@ -0,0 +1,837 @@
|
|||||||
|
use chrono::{Duration, Utc};
|
||||||
|
use rocket::serde::json::Json;
|
||||||
|
use rocket::Route;
|
||||||
|
use serde_json::Value;
|
||||||
|
use std::borrow::Borrow;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::{core::CipherSyncData, EmptyResult, JsonResult, JsonUpcase, NumberOrString},
|
||||||
|
auth::{decode_emergency_access_invite, Headers},
|
||||||
|
db::{models::*, DbConn, DbPool},
|
||||||
|
mail, CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::{stream, stream::StreamExt};
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![
|
||||||
|
get_contacts,
|
||||||
|
get_grantees,
|
||||||
|
get_emergency_access,
|
||||||
|
put_emergency_access,
|
||||||
|
delete_emergency_access,
|
||||||
|
post_delete_emergency_access,
|
||||||
|
send_invite,
|
||||||
|
resend_invite,
|
||||||
|
accept_invite,
|
||||||
|
confirm_emergency_access,
|
||||||
|
initiate_emergency_access,
|
||||||
|
approve_emergency_access,
|
||||||
|
reject_emergency_access,
|
||||||
|
takeover_emergency_access,
|
||||||
|
password_emergency_access,
|
||||||
|
view_emergency_access,
|
||||||
|
policies_emergency_access,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
// region get
|
||||||
|
|
||||||
|
#[get("/emergency-access/trusted")]
|
||||||
|
async fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let emergency_access_list_json =
|
||||||
|
stream::iter(EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn).await)
|
||||||
|
.then(|e| async {
|
||||||
|
let e = e; // Move out this single variable
|
||||||
|
e.to_json_grantee_details(&conn).await
|
||||||
|
})
|
||||||
|
.collect::<Vec<Value>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Data": emergency_access_list_json,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/emergency-access/granted")]
|
||||||
|
async fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let emergency_access_list_json =
|
||||||
|
stream::iter(EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn).await)
|
||||||
|
.then(|e| async {
|
||||||
|
let e = e; // Move out this single variable
|
||||||
|
e.to_json_grantor_details(&conn).await
|
||||||
|
})
|
||||||
|
.collect::<Vec<Value>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Data": emergency_access_list_json,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/emergency-access/<emer_id>")]
|
||||||
|
async fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&conn).await)),
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// endregion
|
||||||
|
|
||||||
|
// region put/post
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct EmergencyAccessUpdateData {
|
||||||
|
Type: NumberOrString,
|
||||||
|
WaitTimeDays: i32,
|
||||||
|
KeyEncrypted: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||||
|
async fn put_emergency_access(
|
||||||
|
emer_id: String,
|
||||||
|
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
post_emergency_access(emer_id, data, conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||||
|
async fn post_emergency_access(
|
||||||
|
emer_id: String,
|
||||||
|
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
||||||
|
|
||||||
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emergency_access) => emergency_access,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
||||||
|
Some(new_type) => new_type as i32,
|
||||||
|
None => err!("Invalid emergency access type."),
|
||||||
|
};
|
||||||
|
|
||||||
|
emergency_access.atype = new_type;
|
||||||
|
emergency_access.wait_time_days = data.WaitTimeDays;
|
||||||
|
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||||
|
|
||||||
|
emergency_access.save(&conn).await?;
|
||||||
|
Ok(Json(emergency_access.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// endregion
|
||||||
|
|
||||||
|
// region delete
|
||||||
|
|
||||||
|
#[delete("/emergency-access/<emer_id>")]
|
||||||
|
async fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let grantor_user = headers.user;
|
||||||
|
|
||||||
|
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => {
|
||||||
|
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
|
}
|
||||||
|
emer
|
||||||
|
}
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
emergency_access.delete(&conn).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/delete")]
|
||||||
|
async fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
delete_emergency_access(emer_id, headers, conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
// endregion
|
||||||
|
|
||||||
|
// region invite
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct EmergencyAccessInviteData {
|
||||||
|
Email: String,
|
||||||
|
Type: NumberOrString,
|
||||||
|
WaitTimeDays: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/invite", data = "<data>")]
|
||||||
|
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let data: EmergencyAccessInviteData = data.into_inner().data;
|
||||||
|
let email = data.Email.to_lowercase();
|
||||||
|
let wait_time_days = data.WaitTimeDays;
|
||||||
|
|
||||||
|
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
|
||||||
|
|
||||||
|
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
||||||
|
Some(new_type) => new_type as i32,
|
||||||
|
None => err!("Invalid emergency access type."),
|
||||||
|
};
|
||||||
|
|
||||||
|
let grantor_user = headers.user;
|
||||||
|
|
||||||
|
// avoid setting yourself as emergency contact
|
||||||
|
if email == grantor_user.email {
|
||||||
|
err!("You can not set yourself as an emergency contact.")
|
||||||
|
}
|
||||||
|
|
||||||
|
let grantee_user = match User::find_by_mail(&email, &conn).await {
|
||||||
|
None => {
|
||||||
|
if !CONFIG.invitations_allowed() {
|
||||||
|
err!(format!("Grantee user does not exist: {}", email))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !CONFIG.is_email_domain_allowed(&email) {
|
||||||
|
err!("Email domain not eligible for invitations")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !CONFIG.mail_enabled() {
|
||||||
|
let invitation = Invitation::new(email.clone());
|
||||||
|
invitation.save(&conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut user = User::new(email.clone());
|
||||||
|
user.save(&conn).await?;
|
||||||
|
user
|
||||||
|
}
|
||||||
|
Some(user) => user,
|
||||||
|
};
|
||||||
|
|
||||||
|
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||||
|
&grantor_user.uuid,
|
||||||
|
&grantee_user.uuid,
|
||||||
|
&grantee_user.email,
|
||||||
|
&conn,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
|
err!(format!("Grantee user already invited: {}", email))
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut new_emergency_access = EmergencyAccess::new(
|
||||||
|
grantor_user.uuid.clone(),
|
||||||
|
Some(grantee_user.email.clone()),
|
||||||
|
emergency_access_status,
|
||||||
|
new_type,
|
||||||
|
wait_time_days,
|
||||||
|
);
|
||||||
|
new_emergency_access.save(&conn).await?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_emergency_access_invite(
|
||||||
|
&grantee_user.email,
|
||||||
|
&grantee_user.uuid,
|
||||||
|
Some(new_emergency_access.uuid),
|
||||||
|
Some(grantor_user.name.clone()),
|
||||||
|
Some(grantor_user.email),
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
// Automatically mark user as accepted if no email invites
|
||||||
|
match User::find_by_mail(&email, &conn).await {
|
||||||
|
Some(user) => {
|
||||||
|
match accept_invite_process(user.uuid, new_emergency_access.uuid, Some(email), conn.borrow()).await {
|
||||||
|
Ok(v) => (v),
|
||||||
|
Err(e) => err!(e.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => err!("Grantee user not found."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||||
|
async fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if emergency_access.grantor_uuid != headers.user.uuid {
|
||||||
|
err!("Emergency access not valid.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
||||||
|
err!("The grantee user is already accepted or confirmed to the organization");
|
||||||
|
}
|
||||||
|
|
||||||
|
let email = match emergency_access.email.clone() {
|
||||||
|
Some(email) => email,
|
||||||
|
None => err!("Email not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
let grantee_user = match User::find_by_mail(&email, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantee user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
let grantor_user = headers.user;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_emergency_access_invite(
|
||||||
|
&email,
|
||||||
|
&grantor_user.uuid,
|
||||||
|
Some(emergency_access.uuid),
|
||||||
|
Some(grantor_user.name.clone()),
|
||||||
|
Some(grantor_user.email),
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
if Invitation::find_by_mail(&email, &conn).await.is_none() {
|
||||||
|
let invitation = Invitation::new(email);
|
||||||
|
invitation.save(&conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Automatically mark user as accepted if no email invites
|
||||||
|
match accept_invite_process(grantee_user.uuid, emergency_access.uuid, emergency_access.email, conn.borrow())
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(v) => (v),
|
||||||
|
Err(e) => err!(e.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct AcceptData {
|
||||||
|
Token: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||||
|
async fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) -> EmptyResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let data: AcceptData = data.into_inner().data;
|
||||||
|
let token = &data.Token;
|
||||||
|
let claims = decode_emergency_access_invite(token)?;
|
||||||
|
|
||||||
|
let grantee_user = match User::find_by_mail(&claims.email, &conn).await {
|
||||||
|
Some(user) => {
|
||||||
|
Invitation::take(&claims.email, &conn).await;
|
||||||
|
user
|
||||||
|
}
|
||||||
|
None => err!("Invited user not found"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
// get grantor user to send Accepted email
|
||||||
|
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantor user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if (claims.emer_id.is_some() && emer_id == claims.emer_id.unwrap())
|
||||||
|
&& (claims.grantor_name.is_some() && grantor_user.name == claims.grantor_name.unwrap())
|
||||||
|
&& (claims.grantor_email.is_some() && grantor_user.email == claims.grantor_email.unwrap())
|
||||||
|
{
|
||||||
|
match accept_invite_process(grantee_user.uuid.clone(), emer_id, Some(grantee_user.email.clone()), &conn).await {
|
||||||
|
Ok(v) => (v),
|
||||||
|
Err(e) => err!(e.to_string()),
|
||||||
|
}
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
err!("Emergency access invitation error.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn accept_invite_process(
|
||||||
|
grantee_uuid: String,
|
||||||
|
emer_id: String,
|
||||||
|
email: Option<String>,
|
||||||
|
conn: &DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
let emer_email = emergency_access.email;
|
||||||
|
if emer_email.is_none() || emer_email != email {
|
||||||
|
err!("User email does not match invite.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
|
||||||
|
err!("Emergency contact already accepted.");
|
||||||
|
}
|
||||||
|
|
||||||
|
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
||||||
|
emergency_access.grantee_uuid = Some(grantee_uuid);
|
||||||
|
emergency_access.email = None;
|
||||||
|
emergency_access.save(conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct ConfirmData {
|
||||||
|
Key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||||
|
async fn confirm_emergency_access(
|
||||||
|
emer_id: String,
|
||||||
|
data: JsonUpcase<ConfirmData>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let confirming_user = headers.user;
|
||||||
|
let data: ConfirmData = data.into_inner().data;
|
||||||
|
let key = data.Key;
|
||||||
|
|
||||||
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
||||||
|
|| emergency_access.grantor_uuid != confirming_user.uuid
|
||||||
|
{
|
||||||
|
err!("Emergency access not valid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantor user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||||
|
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantee user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||||
|
emergency_access.key_encrypted = Some(key);
|
||||||
|
emergency_access.email = None;
|
||||||
|
|
||||||
|
emergency_access.save(&conn).await?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name)?;
|
||||||
|
}
|
||||||
|
Ok(Json(emergency_access.to_json()))
|
||||||
|
} else {
|
||||||
|
err!("Grantee user not found.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// endregion
|
||||||
|
|
||||||
|
// region access emergency access
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/initiate")]
|
||||||
|
async fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let initiating_user = headers.user;
|
||||||
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|
||||||
|
|| emergency_access.grantee_uuid != Some(initiating_user.uuid.clone())
|
||||||
|
{
|
||||||
|
err!("Emergency access not valid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantor user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
let now = Utc::now().naive_utc();
|
||||||
|
emergency_access.status = EmergencyAccessStatus::RecoveryInitiated as i32;
|
||||||
|
emergency_access.updated_at = now;
|
||||||
|
emergency_access.recovery_initiated_at = Some(now);
|
||||||
|
emergency_access.last_notification_at = Some(now);
|
||||||
|
emergency_access.save(&conn).await?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_emergency_access_recovery_initiated(
|
||||||
|
&grantor_user.email,
|
||||||
|
&initiating_user.name,
|
||||||
|
emergency_access.get_type_as_str(),
|
||||||
|
&emergency_access.wait_time_days.clone().to_string(),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Ok(Json(emergency_access.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/approve")]
|
||||||
|
async fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let approving_user = headers.user;
|
||||||
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||||
|
|| emergency_access.grantor_uuid != approving_user.uuid
|
||||||
|
{
|
||||||
|
err!("Emergency access not valid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
let grantor_user = match User::find_by_uuid(&approving_user.uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantor user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||||
|
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantee user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||||
|
emergency_access.save(&conn).await?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name)?;
|
||||||
|
}
|
||||||
|
Ok(Json(emergency_access.to_json()))
|
||||||
|
} else {
|
||||||
|
err!("Grantee user not found.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/reject")]
|
||||||
|
async fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let rejecting_user = headers.user;
|
||||||
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||||
|
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|
||||||
|
|| emergency_access.grantor_uuid != rejecting_user.uuid
|
||||||
|
{
|
||||||
|
err!("Emergency access not valid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
let grantor_user = match User::find_by_uuid(&rejecting_user.uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantor user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||||
|
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantee user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||||
|
emergency_access.save(&conn).await?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name)?;
|
||||||
|
}
|
||||||
|
Ok(Json(emergency_access.to_json()))
|
||||||
|
} else {
|
||||||
|
err!("Grantee user not found.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// endregion
|
||||||
|
|
||||||
|
// region action
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/view")]
|
||||||
|
async fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let requesting_user = headers.user;
|
||||||
|
let host = headers.host;
|
||||||
|
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::View) {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn).await;
|
||||||
|
let cipher_sync_data = CipherSyncData::new(&emergency_access.grantor_uuid, &ciphers, &conn).await;
|
||||||
|
|
||||||
|
let ciphers_json = stream::iter(ciphers)
|
||||||
|
.then(|c| async {
|
||||||
|
let c = c; // Move out this single variable
|
||||||
|
c.to_json(&host, &emergency_access.grantor_uuid, Some(&cipher_sync_data), &conn).await
|
||||||
|
})
|
||||||
|
.collect::<Vec<Value>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Ciphers": ciphers_json,
|
||||||
|
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||||
|
"Object": "emergencyAccessView",
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/takeover")]
|
||||||
|
async fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let requesting_user = headers.user;
|
||||||
|
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantor user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Kdf": grantor_user.client_kdf_type,
|
||||||
|
"KdfIterations": grantor_user.client_kdf_iter,
|
||||||
|
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||||
|
"Object": "emergencyAccessTakeover",
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct EmergencyAccessPasswordData {
|
||||||
|
NewMasterPasswordHash: String,
|
||||||
|
Key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||||
|
async fn password_emergency_access(
|
||||||
|
emer_id: String,
|
||||||
|
data: JsonUpcase<EmergencyAccessPasswordData>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
check_emergency_access_allowed()?;
|
||||||
|
|
||||||
|
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
||||||
|
let new_master_password_hash = &data.NewMasterPasswordHash;
|
||||||
|
let key = data.Key;
|
||||||
|
|
||||||
|
let requesting_user = headers.user;
|
||||||
|
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantor user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
// change grantor_user password
|
||||||
|
grantor_user.set_password(new_master_password_hash, None);
|
||||||
|
grantor_user.akey = key;
|
||||||
|
grantor_user.save(&conn).await?;
|
||||||
|
|
||||||
|
// Disable TwoFactor providers since they will otherwise block logins
|
||||||
|
TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn).await?;
|
||||||
|
|
||||||
|
// Remove grantor from all organisations unless Owner
|
||||||
|
for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &conn).await {
|
||||||
|
if user_org.atype != UserOrgType::Owner as i32 {
|
||||||
|
user_org.delete(&conn).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// endregion
|
||||||
|
|
||||||
|
#[get("/emergency-access/<emer_id>/policies")]
|
||||||
|
async fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
let requesting_user = headers.user;
|
||||||
|
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("Grantor user not found."),
|
||||||
|
};
|
||||||
|
|
||||||
|
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &conn);
|
||||||
|
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Data": policies_json,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_valid_request(
|
||||||
|
emergency_access: &EmergencyAccess,
|
||||||
|
requesting_user_uuid: String,
|
||||||
|
requested_access_type: EmergencyAccessType,
|
||||||
|
) -> bool {
|
||||||
|
emergency_access.grantee_uuid == Some(requesting_user_uuid)
|
||||||
|
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
|
||||||
|
&& emergency_access.atype == requested_access_type as i32
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_emergency_access_allowed() -> EmptyResult {
|
||||||
|
if !CONFIG.emergency_access_allowed() {
|
||||||
|
err!("Emergency access is not allowed.")
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn emergency_request_timeout_job(pool: DbPool) {
|
||||||
|
debug!("Start emergency_request_timeout_job");
|
||||||
|
if !CONFIG.emergency_access_allowed() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(conn) = pool.get().await {
|
||||||
|
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn).await;
|
||||||
|
|
||||||
|
if emergency_access_list.is_empty() {
|
||||||
|
debug!("No emergency request timeout to approve");
|
||||||
|
}
|
||||||
|
|
||||||
|
for mut emer in emergency_access_list {
|
||||||
|
if emer.recovery_initiated_at.is_some()
|
||||||
|
&& Utc::now().naive_utc()
|
||||||
|
>= emer.recovery_initiated_at.unwrap() + Duration::days(emer.wait_time_days as i64)
|
||||||
|
{
|
||||||
|
emer.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||||
|
emer.save(&conn).await.expect("Cannot save emergency access on job");
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
// get grantor user to send Accepted email
|
||||||
|
let grantor_user =
|
||||||
|
User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found.");
|
||||||
|
|
||||||
|
// get grantee user to send Accepted email
|
||||||
|
let grantee_user =
|
||||||
|
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
|
||||||
|
.await
|
||||||
|
.expect("Grantee user not found.");
|
||||||
|
|
||||||
|
mail::send_emergency_access_recovery_timed_out(
|
||||||
|
&grantor_user.email,
|
||||||
|
&grantee_user.name.clone(),
|
||||||
|
emer.get_type_as_str(),
|
||||||
|
)
|
||||||
|
.expect("Error on sending email");
|
||||||
|
|
||||||
|
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name.clone())
|
||||||
|
.expect("Error on sending email");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while searching emergency request timed out")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
||||||
|
debug!("Start emergency_notification_reminder_job");
|
||||||
|
if !CONFIG.emergency_access_allowed() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(conn) = pool.get().await {
|
||||||
|
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn).await;
|
||||||
|
|
||||||
|
if emergency_access_list.is_empty() {
|
||||||
|
debug!("No emergency request reminder notification to send");
|
||||||
|
}
|
||||||
|
|
||||||
|
for mut emer in emergency_access_list {
|
||||||
|
if (emer.recovery_initiated_at.is_some()
|
||||||
|
&& Utc::now().naive_utc()
|
||||||
|
>= emer.recovery_initiated_at.unwrap() + Duration::days((emer.wait_time_days as i64) - 1))
|
||||||
|
&& (emer.last_notification_at.is_none()
|
||||||
|
|| (emer.last_notification_at.is_some()
|
||||||
|
&& Utc::now().naive_utc() >= emer.last_notification_at.unwrap() + Duration::days(1)))
|
||||||
|
{
|
||||||
|
emer.save(&conn).await.expect("Cannot save emergency access on job");
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
// get grantor user to send Accepted email
|
||||||
|
let grantor_user =
|
||||||
|
User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found.");
|
||||||
|
|
||||||
|
// get grantee user to send Accepted email
|
||||||
|
let grantee_user =
|
||||||
|
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
|
||||||
|
.await
|
||||||
|
.expect("Grantee user not found.");
|
||||||
|
|
||||||
|
mail::send_emergency_access_recovery_reminder(
|
||||||
|
&grantor_user.email,
|
||||||
|
&grantee_user.name.clone(),
|
||||||
|
emer.get_type_as_str(),
|
||||||
|
&emer.wait_time_days.to_string(), // TODO(jjlin): This should be the number of days left.
|
||||||
|
)
|
||||||
|
.expect("Error on sending email");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while searching emergency notification reminder")
|
||||||
|
}
|
||||||
|
}
|
@@ -1,4 +1,4 @@
|
|||||||
use rocket_contrib::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -8,21 +8,12 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<rocket::Route> {
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
routes![
|
routes![get_folders, get_folder, post_folders, post_folder, put_folder, delete_folder_post, delete_folder,]
|
||||||
get_folders,
|
|
||||||
get_folder,
|
|
||||||
post_folders,
|
|
||||||
post_folder,
|
|
||||||
put_folder,
|
|
||||||
delete_folder_post,
|
|
||||||
delete_folder,
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders")]
|
#[get("/folders")]
|
||||||
fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
async fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
let folders = Folder::find_by_user(&headers.user.uuid, &conn).await;
|
||||||
|
|
||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
@@ -33,8 +24,8 @@ fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders/<uuid>")]
|
#[get("/folders/<uuid>")]
|
||||||
fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
let folder = match Folder::find_by_uuid(&uuid, &conn).await {
|
||||||
Some(folder) => folder,
|
Some(folder) => folder,
|
||||||
_ => err!("Invalid folder"),
|
_ => err!("Invalid folder"),
|
||||||
};
|
};
|
||||||
@@ -53,27 +44,39 @@ pub struct FolderData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders", data = "<data>")]
|
#[post("/folders", data = "<data>")]
|
||||||
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner().data;
|
||||||
|
|
||||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
||||||
|
|
||||||
folder.save(&conn)?;
|
folder.save(&conn).await?;
|
||||||
nt.send_folder_update(UpdateType::FolderCreate, &folder);
|
nt.send_folder_update(UpdateType::FolderCreate, &folder);
|
||||||
|
|
||||||
Ok(Json(folder.to_json()))
|
Ok(Json(folder.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>", data = "<data>")]
|
#[post("/folders/<uuid>", data = "<data>")]
|
||||||
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
async fn post_folder(
|
||||||
put_folder(uuid, data, headers, conn, nt)
|
uuid: String,
|
||||||
|
data: JsonUpcase<FolderData>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
|
put_folder(uuid, data, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/folders/<uuid>", data = "<data>")]
|
#[put("/folders/<uuid>", data = "<data>")]
|
||||||
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
async fn put_folder(
|
||||||
|
uuid: String,
|
||||||
|
data: JsonUpcase<FolderData>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner().data;
|
||||||
|
|
||||||
let mut folder = match Folder::find_by_uuid(&uuid, &conn) {
|
let mut folder = match Folder::find_by_uuid(&uuid, &conn).await {
|
||||||
Some(folder) => folder,
|
Some(folder) => folder,
|
||||||
_ => err!("Invalid folder"),
|
_ => err!("Invalid folder"),
|
||||||
};
|
};
|
||||||
@@ -84,20 +87,20 @@ fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn
|
|||||||
|
|
||||||
folder.name = data.Name;
|
folder.name = data.Name;
|
||||||
|
|
||||||
folder.save(&conn)?;
|
folder.save(&conn).await?;
|
||||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder);
|
nt.send_folder_update(UpdateType::FolderUpdate, &folder);
|
||||||
|
|
||||||
Ok(Json(folder.to_json()))
|
Ok(Json(folder.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>/delete")]
|
#[post("/folders/<uuid>/delete")]
|
||||||
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
async fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
delete_folder(uuid, headers, conn, nt)
|
delete_folder(uuid, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/folders/<uuid>")]
|
#[delete("/folders/<uuid>")]
|
||||||
fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
async fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
let folder = match Folder::find_by_uuid(&uuid, &conn).await {
|
||||||
Some(folder) => folder,
|
Some(folder) => folder,
|
||||||
_ => err!("Invalid folder"),
|
_ => err!("Invalid folder"),
|
||||||
};
|
};
|
||||||
@@ -107,7 +110,7 @@ fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> Em
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete the actual folder entry
|
// Delete the actual folder entry
|
||||||
folder.delete(&conn)?;
|
folder.delete(&conn).await?;
|
||||||
|
|
||||||
nt.send_folder_update(UpdateType::FolderDelete, &folder);
|
nt.send_folder_update(UpdateType::FolderDelete, &folder);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@@ -1,30 +1,35 @@
|
|||||||
mod accounts;
|
pub mod accounts;
|
||||||
mod ciphers;
|
mod ciphers;
|
||||||
|
mod emergency_access;
|
||||||
mod folders;
|
mod folders;
|
||||||
mod organizations;
|
mod organizations;
|
||||||
pub mod two_factor;
|
|
||||||
mod sends;
|
mod sends;
|
||||||
|
pub mod two_factor;
|
||||||
|
|
||||||
pub use sends::start_send_deletion_scheduler;
|
pub use ciphers::purge_trashed_ciphers;
|
||||||
|
pub use ciphers::CipherSyncData;
|
||||||
|
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||||
|
pub use sends::purge_sends;
|
||||||
|
pub use two_factor::send_incomplete_2fa_notifications;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut mod_routes = routes![
|
let mut device_token_routes = routes![clear_device_token, put_device_token];
|
||||||
clear_device_token,
|
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||||
put_device_token,
|
let mut hibp_routes = routes![hibp_breach];
|
||||||
get_eq_domains,
|
let mut meta_routes = routes![alive, now, version];
|
||||||
post_eq_domains,
|
|
||||||
put_eq_domains,
|
|
||||||
hibp_breach,
|
|
||||||
];
|
|
||||||
|
|
||||||
let mut routes = Vec::new();
|
let mut routes = Vec::new();
|
||||||
routes.append(&mut accounts::routes());
|
routes.append(&mut accounts::routes());
|
||||||
routes.append(&mut ciphers::routes());
|
routes.append(&mut ciphers::routes());
|
||||||
|
routes.append(&mut emergency_access::routes());
|
||||||
routes.append(&mut folders::routes());
|
routes.append(&mut folders::routes());
|
||||||
routes.append(&mut organizations::routes());
|
routes.append(&mut organizations::routes());
|
||||||
routes.append(&mut two_factor::routes());
|
routes.append(&mut two_factor::routes());
|
||||||
routes.append(&mut sends::routes());
|
routes.append(&mut sends::routes());
|
||||||
routes.append(&mut mod_routes);
|
routes.append(&mut device_token_routes);
|
||||||
|
routes.append(&mut eq_domains_routes);
|
||||||
|
routes.append(&mut hibp_routes);
|
||||||
|
routes.append(&mut meta_routes);
|
||||||
|
|
||||||
routes
|
routes
|
||||||
}
|
}
|
||||||
@@ -32,9 +37,8 @@ pub fn routes() -> Vec<Route> {
|
|||||||
//
|
//
|
||||||
// Move this somewhere else
|
// Move this somewhere else
|
||||||
//
|
//
|
||||||
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use rocket_contrib::json::Json;
|
|
||||||
use rocket::response::Response;
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -42,10 +46,11 @@ use crate::{
|
|||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::DbConn,
|
db::DbConn,
|
||||||
error::Error,
|
error::Error,
|
||||||
|
util::get_reqwest_client,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||||
fn clear_device_token<'a>(uuid: String) -> Response<'a> {
|
fn clear_device_token(uuid: String) -> &'static str {
|
||||||
// This endpoint doesn't have auth header
|
// This endpoint doesn't have auth header
|
||||||
|
|
||||||
let _ = uuid;
|
let _ = uuid;
|
||||||
@@ -54,7 +59,7 @@ fn clear_device_token<'a>(uuid: String) -> Response<'a> {
|
|||||||
// This only clears push token
|
// This only clears push token
|
||||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||||
Response::new()
|
""
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||||
@@ -122,7 +127,7 @@ struct EquivDomainData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/settings/domains", data = "<data>")]
|
#[post("/settings/domains", data = "<data>")]
|
||||||
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: EquivDomainData = data.into_inner().data;
|
let data: EquivDomainData = data.into_inner().data;
|
||||||
|
|
||||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||||
@@ -134,41 +139,34 @@ fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: Db
|
|||||||
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string());
|
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string());
|
||||||
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string());
|
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string());
|
||||||
|
|
||||||
user.save(&conn)?;
|
user.save(&conn).await?;
|
||||||
|
|
||||||
Ok(Json(json!({})))
|
Ok(Json(json!({})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/settings/domains", data = "<data>")]
|
#[put("/settings/domains", data = "<data>")]
|
||||||
fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
post_eq_domains(data, headers, conn)
|
post_eq_domains(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/hibp/breach?<username>")]
|
#[get("/hibp/breach?<username>")]
|
||||||
fn hibp_breach(username: String) -> JsonResult {
|
async fn hibp_breach(username: String) -> JsonResult {
|
||||||
let user_agent = "Bitwarden_RS";
|
|
||||||
let url = format!(
|
let url = format!(
|
||||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||||
username
|
username
|
||||||
);
|
);
|
||||||
|
|
||||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
|
||||||
|
|
||||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||||
let hibp_client = Client::builder().build()?;
|
let hibp_client = get_reqwest_client();
|
||||||
|
|
||||||
let res = hibp_client
|
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?;
|
||||||
.get(&url)
|
|
||||||
.header(USER_AGENT, user_agent)
|
|
||||||
.header("hibp-api-key", api_key)
|
|
||||||
.send()?;
|
|
||||||
|
|
||||||
// If we get a 404, return a 404, it means no breached accounts
|
// If we get a 404, return a 404, it means no breached accounts
|
||||||
if res.status() == 404 {
|
if res.status() == 404 {
|
||||||
return Err(Error::empty().with_code(404));
|
return Err(Error::empty().with_code(404));
|
||||||
}
|
}
|
||||||
|
|
||||||
let value: Value = res.error_for_status()?.json()?;
|
let value: Value = res.error_for_status()?.json().await?;
|
||||||
Ok(Json(value))
|
Ok(Json(value))
|
||||||
} else {
|
} else {
|
||||||
Ok(Json(json!([{
|
Ok(Json(json!([{
|
||||||
@@ -178,7 +176,7 @@ fn hibp_breach(username: String) -> JsonResult {
|
|||||||
"BreachDate": "2019-08-18T00:00:00Z",
|
"BreachDate": "2019-08-18T00:00:00Z",
|
||||||
"AddedDate": "2019-08-18T00:00:00Z",
|
"AddedDate": "2019-08-18T00:00:00Z",
|
||||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||||
"LogoPath": "bwrs_static/hibp.png",
|
"LogoPath": "vw_static/hibp.png",
|
||||||
"PwnCount": 0,
|
"PwnCount": 0,
|
||||||
"DataClasses": [
|
"DataClasses": [
|
||||||
"Error - No API key set!"
|
"Error - No API key set!"
|
||||||
@@ -186,3 +184,19 @@ fn hibp_breach(username: String) -> JsonResult {
|
|||||||
}])))
|
}])))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We use DbConn here to let the alive healthcheck also verify the database connection.
|
||||||
|
#[get("/alive")]
|
||||||
|
fn alive(_conn: DbConn) -> Json<String> {
|
||||||
|
now()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/now")]
|
||||||
|
pub fn now() -> Json<String> {
|
||||||
|
Json(crate::util::format_date(&chrono::Utc::now().naive_utc()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/version")]
|
||||||
|
fn version() -> Json<&'static str> {
|
||||||
|
Json(crate::VERSION.unwrap_or_default())
|
||||||
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,17 @@
|
|||||||
use std::{io::Read, path::Path};
|
use std::path::Path;
|
||||||
|
|
||||||
use chrono::{DateTime, Duration, Utc};
|
use chrono::{DateTime, Duration, Utc};
|
||||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
use rocket::form::Form;
|
||||||
use rocket::{http::ContentType, Data};
|
use rocket::fs::NamedFile;
|
||||||
use rocket_contrib::json::Json;
|
use rocket::fs::TempFile;
|
||||||
|
use rocket::serde::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
|
||||||
auth::{Headers, Host},
|
auth::{ClientIp, Headers, Host},
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn, DbPool},
|
||||||
|
util::SafeString,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -17,49 +19,45 @@ const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer availab
|
|||||||
|
|
||||||
pub fn routes() -> Vec<rocket::Route> {
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
routes![
|
routes![
|
||||||
|
get_sends,
|
||||||
|
get_send,
|
||||||
post_send,
|
post_send,
|
||||||
post_send_file,
|
post_send_file,
|
||||||
post_access,
|
post_access,
|
||||||
post_access_file,
|
post_access_file,
|
||||||
put_send,
|
put_send,
|
||||||
delete_send,
|
delete_send,
|
||||||
put_remove_password
|
put_remove_password,
|
||||||
|
download_send
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start_send_deletion_scheduler(pool: crate::db::DbPool) {
|
pub async fn purge_sends(pool: DbPool) {
|
||||||
std::thread::spawn(move || {
|
debug!("Purging sends");
|
||||||
loop {
|
if let Ok(conn) = pool.get().await {
|
||||||
if let Ok(conn) = pool.get() {
|
Send::purge(&conn).await;
|
||||||
info!("Initiating send deletion");
|
} else {
|
||||||
for send in Send::find_all(&conn) {
|
error!("Failed to get DB connection while purging sends")
|
||||||
if chrono::Utc::now().naive_utc() >= send.deletion_date {
|
|
||||||
send.delete(&conn).ok();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
std::thread::sleep(std::time::Duration::from_secs(3600));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
pub struct SendData {
|
struct SendData {
|
||||||
pub Type: i32,
|
Type: i32,
|
||||||
pub Key: String,
|
Key: String,
|
||||||
pub Password: Option<String>,
|
Password: Option<String>,
|
||||||
pub MaxAccessCount: Option<i32>,
|
MaxAccessCount: Option<NumberOrString>,
|
||||||
pub ExpirationDate: Option<DateTime<Utc>>,
|
ExpirationDate: Option<DateTime<Utc>>,
|
||||||
pub DeletionDate: DateTime<Utc>,
|
DeletionDate: DateTime<Utc>,
|
||||||
pub Disabled: bool,
|
Disabled: bool,
|
||||||
|
HideEmail: Option<bool>,
|
||||||
|
|
||||||
// Data field
|
// Data field
|
||||||
pub Name: String,
|
Name: String,
|
||||||
pub Notes: Option<String>,
|
Notes: Option<String>,
|
||||||
pub Text: Option<Value>,
|
Text: Option<Value>,
|
||||||
pub File: Option<Value>,
|
File: Option<Value>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||||
@@ -67,16 +65,37 @@ pub struct SendData {
|
|||||||
/// modify existing ones, but is allowed to delete them.
|
/// modify existing ones, but is allowed to delete them.
|
||||||
///
|
///
|
||||||
/// Ref: https://bitwarden.com/help/article/policies/#disable-send
|
/// Ref: https://bitwarden.com/help/article/policies/#disable-send
|
||||||
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
///
|
||||||
|
/// There is also a Vaultwarden-specific `sends_allowed` config setting that
|
||||||
|
/// controls this policy globally.
|
||||||
|
async fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||||
let user_uuid = &headers.user.uuid;
|
let user_uuid = &headers.user.uuid;
|
||||||
let policy_type = OrgPolicyType::DisableSend;
|
let policy_type = OrgPolicyType::DisableSend;
|
||||||
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
if !CONFIG.sends_allowed() || OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn).await {
|
||||||
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
/// Enforces the `DisableHideEmail` option of the `Send Options` policy.
|
||||||
|
/// A non-owner/admin user belonging to an org with this option enabled isn't
|
||||||
|
/// allowed to hide their email address from the recipient of a Bitwarden Send,
|
||||||
|
/// but is allowed to remove this option from an existing Send.
|
||||||
|
///
|
||||||
|
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||||
|
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
let hide_email = data.HideEmail.unwrap_or(false);
|
||||||
|
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
|
||||||
|
err!(
|
||||||
|
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||||
|
from recipients when creating or editing a Send."
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||||
let data_val = if data.Type == SendType::Text as i32 {
|
let data_val = if data.Type == SendType::Text as i32 {
|
||||||
data.Text
|
data.Text
|
||||||
} else if data.Type == SendType::File as i32 {
|
} else if data.Type == SendType::File as i32 {
|
||||||
@@ -98,12 +117,16 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc()).await;
|
||||||
send.user_uuid = Some(user_uuid);
|
send.user_uuid = Some(user_uuid);
|
||||||
send.notes = data.Notes;
|
send.notes = data.Notes;
|
||||||
send.max_access_count = data.MaxAccessCount;
|
send.max_access_count = match data.MaxAccessCount {
|
||||||
|
Some(m) => Some(m.into_i32()?),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||||
send.disabled = data.Disabled;
|
send.disabled = data.Disabled;
|
||||||
|
send.hide_email = data.HideEmail;
|
||||||
send.atype = data.Type;
|
send.atype = data.Type;
|
||||||
|
|
||||||
send.set_password(data.Password.as_deref());
|
send.set_password(data.Password.as_deref());
|
||||||
@@ -111,111 +134,110 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
|||||||
Ok(send)
|
Ok(send)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/sends")]
|
||||||
|
async fn get_sends(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
|
let sends = Send::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
|
||||||
|
|
||||||
|
Json(json!({
|
||||||
|
"Data": sends_json,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/sends/<uuid>")]
|
||||||
|
async fn get_send(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
let send = match Send::find_by_uuid(&uuid, &conn).await {
|
||||||
|
Some(send) => send,
|
||||||
|
None => err!("Send not found"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
|
err!("Send is not owned by user")
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/sends", data = "<data>")]
|
#[post("/sends", data = "<data>")]
|
||||||
fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &conn)?;
|
enforce_disable_send_policy(&headers, &conn).await?;
|
||||||
|
|
||||||
let data: SendData = data.into_inner().data;
|
let data: SendData = data.into_inner().data;
|
||||||
|
enforce_disable_hide_email_policy(&data, &headers, &conn).await?;
|
||||||
|
|
||||||
if data.Type == SendType::File as i32 {
|
if data.Type == SendType::File as i32 {
|
||||||
err!("File sends should use /api/sends/file")
|
err!("File sends should use /api/sends/file")
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut send = create_send(data, headers.user.uuid.clone())?;
|
let mut send = create_send(data, headers.user.uuid).await?;
|
||||||
send.save(&conn)?;
|
send.save(&conn).await?;
|
||||||
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&conn).await);
|
||||||
|
|
||||||
Ok(Json(send.to_json()))
|
Ok(Json(send.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(FromForm)]
|
||||||
|
struct UploadData<'f> {
|
||||||
|
model: Json<crate::util::UpCase<SendData>>,
|
||||||
|
data: TempFile<'f>,
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||||
fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &conn)?;
|
enforce_disable_send_policy(&headers, &conn).await?;
|
||||||
|
|
||||||
let boundary = content_type.params().next().expect("No boundary provided").1;
|
let UploadData {
|
||||||
|
model,
|
||||||
|
mut data,
|
||||||
|
} = data.into_inner();
|
||||||
|
let model = model.into_inner().data;
|
||||||
|
|
||||||
let mut mpart = Multipart::with_body(data.open(), boundary);
|
enforce_disable_hide_email_policy(&model, &headers, &conn).await?;
|
||||||
|
|
||||||
// First entry is the SendData JSON
|
// Get the file length and add an extra 5% to avoid issues
|
||||||
let mut model_entry = match mpart.read_entry()? {
|
const SIZE_525_MB: u64 = 550_502_400;
|
||||||
Some(e) if &*e.headers.name == "model" => e,
|
|
||||||
Some(_) => err!("Invalid entry name"),
|
|
||||||
None => err!("No model entry present"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut buf = String::new();
|
|
||||||
model_entry.data.read_to_string(&mut buf)?;
|
|
||||||
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
|
|
||||||
|
|
||||||
// Get the file length and add an extra 10% to avoid issues
|
|
||||||
const SIZE_110_MB: u64 = 115_343_360;
|
|
||||||
|
|
||||||
let size_limit = match CONFIG.user_attachment_limit() {
|
let size_limit = match CONFIG.user_attachment_limit() {
|
||||||
Some(0) => err!("File uploads are disabled"),
|
Some(0) => err!("File uploads are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
|
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn).await;
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment size limit reached! Delete some files to open space")
|
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||||
}
|
}
|
||||||
std::cmp::Ord::max(left as u64, SIZE_110_MB)
|
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||||
}
|
}
|
||||||
None => SIZE_110_MB,
|
None => SIZE_525_MB,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create the Send
|
let mut send = create_send(model, headers.user.uuid).await?;
|
||||||
let mut send = create_send(data.data, headers.user.uuid.clone())?;
|
|
||||||
let file_id: String = data_encoding::HEXLOWER.encode(&crate::crypto::get_random(vec![0; 32]));
|
|
||||||
|
|
||||||
if send.atype != SendType::File as i32 {
|
if send.atype != SendType::File as i32 {
|
||||||
err!("Send content is not a file");
|
err!("Send content is not a file");
|
||||||
}
|
}
|
||||||
|
|
||||||
let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id);
|
let size = data.len();
|
||||||
|
if size > size_limit {
|
||||||
// Read the data entry and save the file
|
err!("Attachment storage limit exceeded with this file");
|
||||||
let mut data_entry = match mpart.read_entry()? {
|
|
||||||
Some(e) if &*e.headers.name == "data" => e,
|
|
||||||
Some(_) => err!("Invalid entry name"),
|
|
||||||
None => err!("No model entry present"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let size = match data_entry
|
|
||||||
.data
|
|
||||||
.save()
|
|
||||||
.memory_threshold(0)
|
|
||||||
.size_limit(size_limit)
|
|
||||||
.with_path(&file_path)
|
|
||||||
{
|
|
||||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
|
||||||
SaveResult::Full(other) => {
|
|
||||||
std::fs::remove_file(&file_path).ok();
|
|
||||||
err!(format!("Attachment is not a file: {:?}", other));
|
|
||||||
}
|
}
|
||||||
SaveResult::Partial(_, reason) => {
|
|
||||||
std::fs::remove_file(&file_path).ok();
|
|
||||||
err!(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
|
||||||
}
|
|
||||||
SaveResult::Error(e) => {
|
|
||||||
std::fs::remove_file(&file_path).ok();
|
|
||||||
err!(format!("Error: {:?}", e));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Set ID and sizes
|
let file_id = crate::crypto::generate_send_id();
|
||||||
|
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||||
|
let file_path = folder_path.join(&file_id);
|
||||||
|
tokio::fs::create_dir_all(&folder_path).await?;
|
||||||
|
data.persist_to(&file_path).await?;
|
||||||
|
|
||||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||||
if let Some(o) = data_value.as_object_mut() {
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
o.insert(String::from("Id"), Value::String(file_id));
|
o.insert(String::from("Id"), Value::String(file_id));
|
||||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||||
o.insert(
|
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
|
||||||
String::from("SizeName"),
|
|
||||||
Value::String(crate::util::get_display_size(size)),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
send.data = serde_json::to_string(&data_value)?;
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
|
|
||||||
// Save the changes in the database
|
// Save the changes in the database
|
||||||
send.save(&conn)?;
|
send.save(&conn).await?;
|
||||||
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn).await);
|
||||||
|
|
||||||
Ok(Json(send.to_json()))
|
Ok(Json(send.to_json()))
|
||||||
}
|
}
|
||||||
@@ -227,8 +249,8 @@ pub struct SendAccessData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||||
fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn) -> JsonResult {
|
async fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||||
let mut send = match Send::find_by_access_id(&access_id, &conn) {
|
let mut send = match Send::find_by_access_id(&access_id, &conn).await {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||||
};
|
};
|
||||||
@@ -256,8 +278,8 @@ fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn
|
|||||||
if send.password_hash.is_some() {
|
if send.password_hash.is_some() {
|
||||||
match data.into_inner().data.Password {
|
match data.into_inner().data.Password {
|
||||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||||
Some(_) => err!("Invalid password."),
|
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
|
||||||
None => err_code!("Password not provided", 401),
|
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -266,20 +288,20 @@ fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn
|
|||||||
send.access_count += 1;
|
send.access_count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
send.save(&conn)?;
|
send.save(&conn).await?;
|
||||||
|
|
||||||
Ok(Json(send.to_json_access()))
|
Ok(Json(send.to_json_access(&conn).await))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||||
fn post_access_file(
|
async fn post_access_file(
|
||||||
send_id: String,
|
send_id: String,
|
||||||
file_id: String,
|
file_id: String,
|
||||||
data: JsonUpcase<SendAccessData>,
|
data: JsonUpcase<SendAccessData>,
|
||||||
host: Host,
|
host: Host,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let mut send = match Send::find_by_uuid(&send_id, &conn) {
|
let mut send = match Send::find_by_uuid(&send_id, &conn).await {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||||
};
|
};
|
||||||
@@ -314,22 +336,41 @@ fn post_access_file(
|
|||||||
|
|
||||||
send.access_count += 1;
|
send.access_count += 1;
|
||||||
|
|
||||||
send.save(&conn)?;
|
send.save(&conn).await?;
|
||||||
|
|
||||||
|
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||||
|
let token = crate::auth::encode_jwt(&token_claims);
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Object": "send-fileDownload",
|
"Object": "send-fileDownload",
|
||||||
"Id": file_id,
|
"Id": file_id,
|
||||||
"Url": format!("{}/sends/{}/{}", &host.host, send_id, file_id)
|
"Url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||||
|
async fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||||
|
if let Ok(claims) = crate::auth::decode_send(&t) {
|
||||||
|
if claims.sub == format!("{}/{}", send_id, file_id) {
|
||||||
|
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
#[put("/sends/<id>", data = "<data>")]
|
#[put("/sends/<id>", data = "<data>")]
|
||||||
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
async fn put_send(
|
||||||
enforce_disable_send_policy(&headers, &conn)?;
|
id: String,
|
||||||
|
data: JsonUpcase<SendData>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
|
enforce_disable_send_policy(&headers, &conn).await?;
|
||||||
|
|
||||||
let data: SendData = data.into_inner().data;
|
let data: SendData = data.into_inner().data;
|
||||||
|
enforce_disable_hide_email_policy(&data, &headers, &conn).await?;
|
||||||
|
|
||||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
let mut send = match Send::find_by_uuid(&id, &conn).await {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
None => err!("Send not found"),
|
None => err!("Send not found"),
|
||||||
};
|
};
|
||||||
@@ -363,8 +404,12 @@ fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbCo
|
|||||||
send.akey = data.Key;
|
send.akey = data.Key;
|
||||||
send.deletion_date = data.DeletionDate.naive_utc();
|
send.deletion_date = data.DeletionDate.naive_utc();
|
||||||
send.notes = data.Notes;
|
send.notes = data.Notes;
|
||||||
send.max_access_count = data.MaxAccessCount;
|
send.max_access_count = match data.MaxAccessCount {
|
||||||
|
Some(m) => Some(m.into_i32()?),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||||
|
send.hide_email = data.HideEmail;
|
||||||
send.disabled = data.Disabled;
|
send.disabled = data.Disabled;
|
||||||
|
|
||||||
// Only change the value if it's present
|
// Only change the value if it's present
|
||||||
@@ -372,15 +417,15 @@ fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbCo
|
|||||||
send.set_password(Some(&password));
|
send.set_password(Some(&password));
|
||||||
}
|
}
|
||||||
|
|
||||||
send.save(&conn)?;
|
send.save(&conn).await?;
|
||||||
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn).await);
|
||||||
|
|
||||||
Ok(Json(send.to_json()))
|
Ok(Json(send.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/sends/<id>")]
|
#[delete("/sends/<id>")]
|
||||||
fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
async fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let send = match Send::find_by_uuid(&id, &conn) {
|
let send = match Send::find_by_uuid(&id, &conn).await {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
None => err!("Send not found"),
|
None => err!("Send not found"),
|
||||||
};
|
};
|
||||||
@@ -389,17 +434,17 @@ fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyR
|
|||||||
err!("Send is not owned by user")
|
err!("Send is not owned by user")
|
||||||
}
|
}
|
||||||
|
|
||||||
send.delete(&conn)?;
|
send.delete(&conn).await?;
|
||||||
nt.send_user_update(UpdateType::SyncSendDelete, &headers.user);
|
nt.send_send_update(UpdateType::SyncSendDelete, &send, &send.update_users_revision(&conn).await);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/sends/<id>/remove-password")]
|
#[put("/sends/<id>/remove-password")]
|
||||||
fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
async fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &conn)?;
|
enforce_disable_send_policy(&headers, &conn).await?;
|
||||||
|
|
||||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
let mut send = match Send::find_by_uuid(&id, &conn).await {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
None => err!("Send not found"),
|
None => err!("Send not found"),
|
||||||
};
|
};
|
||||||
@@ -409,8 +454,8 @@ fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -
|
|||||||
}
|
}
|
||||||
|
|
||||||
send.set_password(None);
|
send.set_password(None);
|
||||||
send.save(&conn)?;
|
send.save(&conn).await?;
|
||||||
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn).await);
|
||||||
|
|
||||||
Ok(Json(send.to_json()))
|
Ok(Json(send.to_json()))
|
||||||
}
|
}
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
use data_encoding::BASE32;
|
use data_encoding::BASE32;
|
||||||
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use rocket_contrib::json::Json;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
@@ -17,15 +17,11 @@ use crate::{
|
|||||||
pub use crate::config::CONFIG;
|
pub use crate::config::CONFIG;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![
|
routes![generate_authenticator, activate_authenticator, activate_authenticator_put,]
|
||||||
generate_authenticator,
|
|
||||||
activate_authenticator,
|
|
||||||
activate_authenticator_put,
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||||
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
@@ -34,7 +30,7 @@ fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn
|
|||||||
}
|
}
|
||||||
|
|
||||||
let type_ = TwoFactorType::Authenticator as i32;
|
let type_ = TwoFactorType::Authenticator as i32;
|
||||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn);
|
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await;
|
||||||
|
|
||||||
let (enabled, key) = match twofactor {
|
let (enabled, key) = match twofactor {
|
||||||
Some(tf) => (true, tf.data),
|
Some(tf) => (true, tf.data),
|
||||||
@@ -57,7 +53,7 @@ struct EnableAuthenticatorData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/authenticator", data = "<data>")]
|
#[post("/two-factor/authenticator", data = "<data>")]
|
||||||
fn activate_authenticator(
|
async fn activate_authenticator(
|
||||||
data: JsonUpcase<EnableAuthenticatorData>,
|
data: JsonUpcase<EnableAuthenticatorData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
ip: ClientIp,
|
ip: ClientIp,
|
||||||
@@ -66,7 +62,7 @@ fn activate_authenticator(
|
|||||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
let password_hash = data.MasterPasswordHash;
|
||||||
let key = data.Key;
|
let key = data.Key;
|
||||||
let token = data.Token.into_i32()? as u64;
|
let token = data.Token.into_string();
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -85,9 +81,9 @@ fn activate_authenticator(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate the token provided with the key, and save new twofactor
|
// Validate the token provided with the key, and save new twofactor
|
||||||
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &ip, &conn)?;
|
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &conn).await?;
|
||||||
|
|
||||||
_generate_recover_code(&mut user, &conn);
|
_generate_recover_code(&mut user, &conn).await;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"Enabled": true,
|
||||||
@@ -97,88 +93,84 @@ fn activate_authenticator(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/authenticator", data = "<data>")]
|
#[put("/two-factor/authenticator", data = "<data>")]
|
||||||
fn activate_authenticator_put(
|
async fn activate_authenticator_put(
|
||||||
data: JsonUpcase<EnableAuthenticatorData>,
|
data: JsonUpcase<EnableAuthenticatorData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
ip: ClientIp,
|
ip: ClientIp,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
activate_authenticator(data, headers, ip, conn)
|
activate_authenticator(data, headers, ip, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_totp_code_str(
|
pub async fn validate_totp_code_str(
|
||||||
user_uuid: &str,
|
user_uuid: &str,
|
||||||
totp_code: &str,
|
totp_code: &str,
|
||||||
secret: &str,
|
secret: &str,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
conn: &DbConn,
|
conn: &DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let totp_code: u64 = match totp_code.parse() {
|
if !totp_code.chars().all(char::is_numeric) {
|
||||||
Ok(code) => code,
|
err!("TOTP code is not a number");
|
||||||
_ => err!("TOTP code is not a number"),
|
|
||||||
};
|
|
||||||
|
|
||||||
validate_totp_code(user_uuid, totp_code, secret, ip, &conn)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
validate_totp_code(user_uuid, totp_code, secret, ip, conn).await
|
||||||
use oath::{totp_raw_custom_time, HashType};
|
}
|
||||||
|
|
||||||
|
pub async fn validate_totp_code(
|
||||||
|
user_uuid: &str,
|
||||||
|
totp_code: &str,
|
||||||
|
secret: &str,
|
||||||
|
ip: &ClientIp,
|
||||||
|
conn: &DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
use totp_lite::{totp_custom, Sha1};
|
||||||
|
|
||||||
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
||||||
Ok(s) => s,
|
Ok(s) => s,
|
||||||
Err(_) => err!("Invalid TOTP secret"),
|
Err(_) => err!("Invalid TOTP secret"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut twofactor = match TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Authenticator as i32, &conn) {
|
let mut twofactor =
|
||||||
|
match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await {
|
||||||
Some(tf) => tf,
|
Some(tf) => tf,
|
||||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// The amount of steps back and forward in time
|
||||||
|
// Also check if we need to disable time drifted TOTP codes.
|
||||||
|
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
||||||
|
let steps = !CONFIG.authenticator_disable_time_drift() as i64;
|
||||||
|
|
||||||
// Get the current system time in UNIX Epoch (UTC)
|
// Get the current system time in UNIX Epoch (UTC)
|
||||||
let current_time = chrono::Utc::now();
|
let current_time = chrono::Utc::now();
|
||||||
let current_timestamp = current_time.timestamp();
|
let current_timestamp = current_time.timestamp();
|
||||||
|
|
||||||
// The amount of steps back and forward in time
|
|
||||||
// Also check if we need to disable time drifted TOTP codes.
|
|
||||||
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
|
||||||
let steps: i64 = if CONFIG.authenticator_disable_time_drift() { 0 } else { 1 };
|
|
||||||
|
|
||||||
for step in -steps..=steps {
|
for step in -steps..=steps {
|
||||||
let time_step = current_timestamp / 30i64 + step;
|
let time_step = current_timestamp / 30i64 + step;
|
||||||
// We need to calculate the time offsite and cast it as an i128.
|
|
||||||
// Else we can't do math with it on a default u64 variable.
|
// We need to calculate the time offsite and cast it as an u64.
|
||||||
|
// Since we only have times into the future and the totp generator needs an u64 instead of the default i64.
|
||||||
let time = (current_timestamp + step * 30i64) as u64;
|
let time = (current_timestamp + step * 30i64) as u64;
|
||||||
let generated = totp_raw_custom_time(&decoded_secret, 6, 0, 30, time, &HashType::SHA1);
|
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
||||||
|
|
||||||
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
||||||
if generated == totp_code && time_step > twofactor.last_used as i64 {
|
if generated == totp_code && time_step > twofactor.last_used as i64 {
|
||||||
// If the step does not equals 0 the time is drifted either server or client side.
|
// If the step does not equals 0 the time is drifted either server or client side.
|
||||||
if step != 0 {
|
if step != 0 {
|
||||||
info!("TOTP Time drift detected. The step offset is {}", step);
|
warn!("TOTP Time drift detected. The step offset is {}", step);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||||
// This will also save a newly created twofactor if the code is correct.
|
// This will also save a newly created twofactor if the code is correct.
|
||||||
twofactor.last_used = time_step as i32;
|
twofactor.last_used = time_step as i32;
|
||||||
twofactor.save(&conn)?;
|
twofactor.save(conn).await?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||||
warn!(
|
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
||||||
"This or a TOTP code within {} steps back and forward has already been used!",
|
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||||
steps
|
|
||||||
);
|
|
||||||
err!(format!(
|
|
||||||
"Invalid TOTP code! Server time: {} IP: {}",
|
|
||||||
current_time.format("%F %T UTC"),
|
|
||||||
ip.ip
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Else no valide code received, deny access
|
// Else no valide code received, deny access
|
||||||
err!(format!(
|
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||||
"Invalid TOTP code! Server time: {} IP: {}",
|
|
||||||
current_time.format("%F %T UTC"),
|
|
||||||
ip.ip
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use data_encoding::BASE64;
|
use data_encoding::BASE64;
|
||||||
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use rocket_contrib::json::Json;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||||
@@ -12,6 +12,7 @@ use crate::{
|
|||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::MapResult,
|
error::MapResult,
|
||||||
|
util::get_reqwest_client,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -59,7 +60,11 @@ impl DuoData {
|
|||||||
ik.replace_range(digits.., replaced);
|
ik.replace_range(digits.., replaced);
|
||||||
sk.replace_range(digits.., replaced);
|
sk.replace_range(digits.., replaced);
|
||||||
|
|
||||||
Self { host, ik, sk }
|
Self {
|
||||||
|
host,
|
||||||
|
ik,
|
||||||
|
sk,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,14 +89,14 @@ impl DuoStatus {
|
|||||||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||||
|
|
||||||
#[post("/two-factor/get-duo", data = "<data>")]
|
#[post("/two-factor/get-duo", data = "<data>")]
|
||||||
fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordData = data.into_inner().data;
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
err!("Invalid password");
|
err!("Invalid password");
|
||||||
}
|
}
|
||||||
|
|
||||||
let data = get_user_duo_data(&headers.user.uuid, &conn);
|
let data = get_user_duo_data(&headers.user.uuid, &conn).await;
|
||||||
|
|
||||||
let (enabled, data) = match data {
|
let (enabled, data) = match data {
|
||||||
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
||||||
@@ -147,7 +152,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/duo", data = "<data>")]
|
#[post("/two-factor/duo", data = "<data>")]
|
||||||
fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: EnableDuoData = data.into_inner().data;
|
let data: EnableDuoData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -158,7 +163,7 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
|||||||
let (data, data_str) = if check_duo_fields_custom(&data) {
|
let (data, data_str) = if check_duo_fields_custom(&data) {
|
||||||
let data_req: DuoData = data.into();
|
let data_req: DuoData = data.into();
|
||||||
let data_str = serde_json::to_string(&data_req)?;
|
let data_str = serde_json::to_string(&data_req)?;
|
||||||
duo_api_request("GET", "/auth/v2/check", "", &data_req).map_res("Failed to validate Duo credentials")?;
|
duo_api_request("GET", "/auth/v2/check", "", &data_req).await.map_res("Failed to validate Duo credentials")?;
|
||||||
(data_req.obscure(), data_str)
|
(data_req.obscure(), data_str)
|
||||||
} else {
|
} else {
|
||||||
(DuoData::secret(), String::new())
|
(DuoData::secret(), String::new())
|
||||||
@@ -166,9 +171,9 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
|||||||
|
|
||||||
let type_ = TwoFactorType::Duo;
|
let type_ = TwoFactorType::Duo;
|
||||||
let twofactor = TwoFactor::new(user.uuid.clone(), type_, data_str);
|
let twofactor = TwoFactor::new(user.uuid.clone(), type_, data_str);
|
||||||
twofactor.save(&conn)?;
|
twofactor.save(&conn).await?;
|
||||||
|
|
||||||
_generate_recover_code(&mut user, &conn);
|
_generate_recover_code(&mut user, &conn).await;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"Enabled": true,
|
||||||
@@ -180,14 +185,12 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/duo", data = "<data>")]
|
#[put("/two-factor/duo", data = "<data>")]
|
||||||
fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
activate_duo(data, headers, conn)
|
activate_duo(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||||
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
|
use reqwest::{header, Method};
|
||||||
|
|
||||||
use reqwest::{blocking::Client, header::*, Method};
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
// https://duo.com/docs/authapi#api-details
|
// https://duo.com/docs/authapi#api-details
|
||||||
@@ -199,12 +202,15 @@ fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> Em
|
|||||||
|
|
||||||
let m = Method::from_str(method).unwrap_or_default();
|
let m = Method::from_str(method).unwrap_or_default();
|
||||||
|
|
||||||
Client::new()
|
let client = get_reqwest_client();
|
||||||
|
|
||||||
|
client
|
||||||
.request(m, &url)
|
.request(m, &url)
|
||||||
.basic_auth(username, Some(password))
|
.basic_auth(username, Some(password))
|
||||||
.header(USER_AGENT, AGENT)
|
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||||
.header(DATE, date)
|
.header(header::DATE, date)
|
||||||
.send()?
|
.send()
|
||||||
|
.await?
|
||||||
.error_for_status()?;
|
.error_for_status()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -217,11 +223,11 @@ const AUTH_PREFIX: &str = "AUTH";
|
|||||||
const DUO_PREFIX: &str = "TX";
|
const DUO_PREFIX: &str = "TX";
|
||||||
const APP_PREFIX: &str = "APP";
|
const APP_PREFIX: &str = "APP";
|
||||||
|
|
||||||
fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
async fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||||
let type_ = TwoFactorType::Duo as i32;
|
let type_ = TwoFactorType::Duo as i32;
|
||||||
|
|
||||||
// If the user doesn't have an entry, disabled
|
// If the user doesn't have an entry, disabled
|
||||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, &conn) {
|
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await {
|
||||||
Some(t) => t,
|
Some(t) => t,
|
||||||
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
||||||
};
|
};
|
||||||
@@ -241,19 +247,20 @@ fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// let (ik, sk, ak, host) = get_duo_keys();
|
// let (ik, sk, ak, host) = get_duo_keys();
|
||||||
fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
|
async fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
|
||||||
let data = User::find_by_mail(email, &conn)
|
let data = match User::find_by_mail(email, conn).await {
|
||||||
.and_then(|u| get_user_duo_data(&u.uuid, &conn).data())
|
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
|
||||||
.or_else(DuoData::global)
|
_ => DuoData::global(),
|
||||||
.map_res("Can't fetch Duo keys")?;
|
}
|
||||||
|
.map_res("Can't fetch Duo Keys")?;
|
||||||
|
|
||||||
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
|
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> {
|
pub async fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> {
|
||||||
let now = Utc::now().timestamp();
|
let now = Utc::now().timestamp();
|
||||||
|
|
||||||
let (ik, sk, ak, host) = get_duo_keys_email(email, conn)?;
|
let (ik, sk, ak, host) = get_duo_keys_email(email, conn).await?;
|
||||||
|
|
||||||
let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE);
|
let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE);
|
||||||
let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE);
|
let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE);
|
||||||
@@ -268,7 +275,7 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
|
|||||||
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
|
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
pub async fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||||
// email is as entered by the user, so it needs to be normalized before
|
// email is as entered by the user, so it needs to be normalized before
|
||||||
// comparison with auth_user below.
|
// comparison with auth_user below.
|
||||||
let email = &email.to_lowercase();
|
let email = &email.to_lowercase();
|
||||||
@@ -283,7 +290,7 @@ pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyRe
|
|||||||
|
|
||||||
let now = Utc::now().timestamp();
|
let now = Utc::now().timestamp();
|
||||||
|
|
||||||
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn)?;
|
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn).await?;
|
||||||
|
|
||||||
let auth_user = parse_duo_values(&sk, auth_sig, &ik, AUTH_PREFIX, now)?;
|
let auth_user = parse_duo_values(&sk, auth_sig, &ik, AUTH_PREFIX, now)?;
|
||||||
let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?;
|
let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?;
|
||||||
@@ -338,7 +345,7 @@ fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -
|
|||||||
err!("Invalid ikey")
|
err!("Invalid ikey")
|
||||||
}
|
}
|
||||||
|
|
||||||
let expire = match expire.parse() {
|
let expire: i64 = match expire.parse() {
|
||||||
Ok(e) => e,
|
Ok(e) => e,
|
||||||
Err(_) => err!("Invalid expire time"),
|
Err(_) => err!("Invalid expire time"),
|
||||||
};
|
};
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user