mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-11 03:05:58 +03:00
Compare commits
304 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
c666497130 | ||
|
2620a1ac8c | ||
|
ffdcafa044 | ||
|
56ffec40f4 | ||
|
96c2416903 | ||
|
340d42a1ca | ||
|
e19420160f | ||
|
1741316f42 | ||
|
4f08167d6f | ||
|
fef76e2f6f | ||
|
f16d56cb27 | ||
|
120b286f2b | ||
|
7f437b6947 | ||
|
8d6e62e18b | ||
|
d0ec410b73 | ||
|
c546a59c38 | ||
|
e5ec245626 | ||
|
6ea95d1ede | ||
|
88bea44dd8 | ||
|
8ee5d51bd4 | ||
|
c640abbcd7 | ||
|
13598c098f | ||
|
a622b4d2fb | ||
|
403f35b571 | ||
|
3968bc8016 | ||
|
ff66368cb6 | ||
|
3fb419e704 | ||
|
832f838ddd | ||
|
18703bf195 | ||
|
ff8e88a5df | ||
|
72e1946ce5 | ||
|
ee391720aa | ||
|
e3a2dfffab | ||
|
8bf1278b1b | ||
|
00ce943ea5 | ||
|
b67eacdfde | ||
|
0dcea75764 | ||
|
0c5532d8b5 | ||
|
46e0f3c43a | ||
|
2cd17fe7af | ||
|
f44b2611e6 | ||
|
82fee0ede3 | ||
|
49579e4ce7 | ||
|
9254cf9d9c | ||
|
ff0fee3690 | ||
|
0778bd4bd5 | ||
|
0cd065d354 | ||
|
8615736e84 | ||
|
5772836be5 | ||
|
c380d9c379 | ||
|
cea7a30d82 | ||
|
06cde29419 | ||
|
20f5988174 | ||
|
b491cfe0b0 | ||
|
fc513413ea | ||
|
3f7e4712cd | ||
|
c2ef331df9 | ||
|
5fef7983f4 | ||
|
29ed82a359 | ||
|
7d5186e40a | ||
|
99270612ba | ||
|
c7b5b6ee07 | ||
|
848d17ffb9 | ||
|
47e8aa29e1 | ||
|
f270f2ed65 | ||
|
aba5b234af | ||
|
9133e2927d | ||
|
38104ba7cf | ||
|
c42bcae224 | ||
|
764e51bbe9 | ||
|
8e6c6a1dc4 | ||
|
7a9cfc45da | ||
|
9e24b9065c | ||
|
1c2b376ca2 | ||
|
746ce2afb4 | ||
|
029008bad5 | ||
|
d3449bfa00 | ||
|
a9a5706764 | ||
|
3ff8014add | ||
|
e60bdc7efe | ||
|
cccd8262fa | ||
|
68e5d95d25 | ||
|
5f458b288a | ||
|
e9ee8ac2fa | ||
|
8a4dfc3bbe | ||
|
4f86517501 | ||
|
7cb19ef767 | ||
|
565439a914 | ||
|
b8010be26b | ||
|
f76b8a32ca | ||
|
39167d333a | ||
|
0d63132987 | ||
|
7b5d5d1302 | ||
|
0dc98bda23 | ||
|
f9a062cac8 | ||
|
6ad4ccd901 | ||
|
ee6ceaa923 | ||
|
20b393d354 | ||
|
f707f86c8e | ||
|
daea54b288 | ||
|
1e5306b820 | ||
|
6890c25ea1 | ||
|
48482fece0 | ||
|
1dc1d4df72 | ||
|
2b4dd6f137 | ||
|
cc021a4784 | ||
|
e3c4609c2a | ||
|
3da44a8d30 | ||
|
34ea10475d | ||
|
89a68741d6 | ||
|
2421d49d9a | ||
|
ced7f1771a | ||
|
af2235bf88 | ||
|
305de2e2cd | ||
|
8756c5c255 | ||
|
27609ac4cc | ||
|
95d906bdbb | ||
|
4bb0d7bc05 | ||
|
d9599155ae | ||
|
1db37bf3d0 | ||
|
d75a80bd2d | ||
|
244bad3a24 | ||
|
f7056bcaa5 | ||
|
994669fb69 | ||
|
3ab90259f2 | ||
|
155109dea1 | ||
|
b268c3dd1c | ||
|
4e64dbdde4 | ||
|
a2955daffe | ||
|
d3921b973b | ||
|
cf6ad3cb15 | ||
|
90e0b7fec6 | ||
|
d77333576b | ||
|
73ff8d79f7 | ||
|
95fc88ae5b | ||
|
1d0eaac260 | ||
|
3565bfc939 | ||
|
a82c04910f | ||
|
233f03ca2b | ||
|
93c881a7a9 | ||
|
0af3956abd | ||
|
15feff3e79 | ||
|
5c5700caa7 | ||
|
3bddc176d6 | ||
|
9caf4bf383 | ||
|
9b2234fa0e | ||
|
1f79fdec4e | ||
|
a56f4c97e4 | ||
|
3a3390963c | ||
|
fd27759a95 | ||
|
01d8056c73 | ||
|
81fa33ebb5 | ||
|
e8aa3bc066 | ||
|
0bf0125e82 | ||
|
6209e778e5 | ||
|
5323283f98 | ||
|
57e17d0648 | ||
|
da55d5ec70 | ||
|
828a060698 | ||
|
3e5971b9db | ||
|
47c2625d38 | ||
|
49af9cf4f5 | ||
|
6b1daeba05 | ||
|
9f1240d8d9 | ||
|
a8138be69b | ||
|
ea57dc3bc9 | ||
|
131348a49f | ||
|
b22564cb00 | ||
|
16eb0a56f9 | ||
|
3e4ff47a38 | ||
|
8ea01a67f6 | ||
|
aa5cc642e1 | ||
|
a121cb6f00 | ||
|
60164182ae | ||
|
f842a80cdb | ||
|
4b6a574ee0 | ||
|
f9ebb780f9 | ||
|
1fc6c30652 | ||
|
46a1a013cd | ||
|
551810c486 | ||
|
b987ba506d | ||
|
84810f2bb2 | ||
|
424d666a50 | ||
|
a71359f647 | ||
|
d93c344176 | ||
|
b9c3213b90 | ||
|
95e24ffc51 | ||
|
00d56d7295 | ||
|
7436b454db | ||
|
8da5b99482 | ||
|
2969e87b52 | ||
|
ce62e898c3 | ||
|
431462d839 | ||
|
7d0e234b34 | ||
|
dad1b1bee9 | ||
|
9312cebee3 | ||
|
cdf5b6ec2d | ||
|
ce99fc8f95 | ||
|
a75d050001 | ||
|
75cfd10f11 | ||
|
9859ba6339 | ||
|
513056f711 | ||
|
ebe334fcc7 | ||
|
0eec12472e | ||
|
39106d440a | ||
|
9117095764 | ||
|
099bba950c | ||
|
e37ff60617 | ||
|
5b14608041 | ||
|
ad92692bab | ||
|
d956d42903 | ||
|
d69be7d03a | ||
|
f82de8d00d | ||
|
c836f88ff2 | ||
|
8b660ae090 | ||
|
9323c57f49 | ||
|
85e3c73525 | ||
|
a74bc2e58f | ||
|
0680638933 | ||
|
46d31ee5f7 | ||
|
e794b397d3 | ||
|
d41350050b | ||
|
4cd5b06b7f | ||
|
cd768439d2 | ||
|
9e5fd2d576 | ||
|
ecb46f591c | ||
|
d62d53aa8e | ||
|
2c515ab13c | ||
|
83d556ff0c | ||
|
678d313836 | ||
|
705d840ea3 | ||
|
7dff8c01dd | ||
|
5860679624 | ||
|
4628e4519d | ||
|
b884fd20a1 | ||
|
67c657003d | ||
|
580c1bbc7d | ||
|
2b6383d243 | ||
|
f27455a26f | ||
|
1d4f900e48 | ||
|
c5ca588a6f | ||
|
06888251e3 | ||
|
1a6e4cf4e4 | ||
|
9f86196a9d | ||
|
1e31043fb3 | ||
|
85adcf1ae5 | ||
|
9abb4d2873 | ||
|
235ff44736 | ||
|
9c2d741749 | ||
|
37cc0c34cf | ||
|
5633b6ac94 | ||
|
175f2aeace | ||
|
feefe69094 | ||
|
46df3ee7cd | ||
|
bb945ad01b | ||
|
de86aa671e | ||
|
e38771bbbd | ||
|
a3f9a8d7dc | ||
|
4b6bc6ef66 | ||
|
455a23361f | ||
|
1a8ec04733 | ||
|
4e60df7a08 | ||
|
219a9d9f5e | ||
|
48baf723a4 | ||
|
6530904883 | ||
|
d15d24f4ff | ||
|
8d992d637e | ||
|
6ebc83c3b7 | ||
|
b32f4451ee | ||
|
99142c7552 | ||
|
db710bb931 | ||
|
a9e9a397d8 | ||
|
d46a6ac687 | ||
|
1eb5495802 | ||
|
7cf8809d77 | ||
|
043aa27aa3 | ||
|
9824d94a1c | ||
|
e8ef76b8f9 | ||
|
be1ddb4203 | ||
|
caddf21fca | ||
|
5379329ef7 | ||
|
6faaeaae66 | ||
|
3fed323385 | ||
|
58a928547d | ||
|
558410c5bd | ||
|
0dc0decaa7 | ||
|
d11d663c5c | ||
|
771233176f | ||
|
ed70b07d81 | ||
|
e25fc7083d | ||
|
fa364c3f2c | ||
|
b5f9fe4d3b | ||
|
013d4c28b2 | ||
|
63acc8619b | ||
|
ec920b5756 | ||
|
95caaf2a40 | ||
|
7099f8bee8 | ||
|
b41a0d840c | ||
|
c577ade90e | ||
|
257b143df1 | ||
|
34ee326ce9 | ||
|
090104ce1b | ||
|
3305d5dc92 | ||
|
5bdcfe128d |
@@ -4,6 +4,8 @@ target
|
|||||||
# Data folder
|
# Data folder
|
||||||
data
|
data
|
||||||
.env
|
.env
|
||||||
|
.env.template
|
||||||
|
.gitattributes
|
||||||
|
|
||||||
# IDE files
|
# IDE files
|
||||||
.vscode
|
.vscode
|
||||||
|
23
.editorconfig
Normal file
23
.editorconfig
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# EditorConfig is awesome: https://EditorConfig.org
|
||||||
|
|
||||||
|
# top-most EditorConfig file
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
charset = utf-8
|
||||||
|
|
||||||
|
[*.{rs,py}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[*.{yml,yaml}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[Makefile]
|
||||||
|
indent_style = tab
|
@@ -1,4 +1,4 @@
|
|||||||
## Bitwarden_RS Configuration File
|
## Vaultwarden Configuration File
|
||||||
## Uncomment any of the following lines to change the defaults
|
## Uncomment any of the following lines to change the defaults
|
||||||
##
|
##
|
||||||
## Be aware that most of these settings will be overridden if they were changed
|
## Be aware that most of these settings will be overridden if they were changed
|
||||||
@@ -28,6 +28,7 @@
|
|||||||
# RSA_KEY_FILENAME=data/rsa_key
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
# ATTACHMENTS_FOLDER=data/attachments
|
# ATTACHMENTS_FOLDER=data/attachments
|
||||||
|
# SENDS_FOLDER=data/sends
|
||||||
|
|
||||||
## Templates data folder, by default uses embedded templates
|
## Templates data folder, by default uses embedded templates
|
||||||
## Check source code to see the format
|
## Check source code to see the format
|
||||||
@@ -35,9 +36,9 @@
|
|||||||
## Automatically reload the templates for every request, slow, use only for development
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
# RELOAD_TEMPLATES=false
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Client-IP"
|
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
# IP_HEADER=X-Client-IP
|
# IP_HEADER=X-Real-IP
|
||||||
|
|
||||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
# ICON_CACHE_TTL=2592000
|
# ICON_CACHE_TTL=2592000
|
||||||
@@ -55,6 +56,28 @@
|
|||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||||
# WEBSOCKET_PORT=3012
|
# WEBSOCKET_PORT=3012
|
||||||
|
|
||||||
|
## Controls whether users are allowed to create Bitwarden Sends.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||||
|
# SENDS_ALLOWED=true
|
||||||
|
|
||||||
|
## Job scheduler settings
|
||||||
|
##
|
||||||
|
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
|
||||||
|
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||||
|
##
|
||||||
|
## How often (in ms) the job scheduler thread checks for jobs that need running.
|
||||||
|
## Set to 0 to globally disable scheduled jobs.
|
||||||
|
# JOB_POLL_INTERVAL_MS=30000
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that checks for Sends past their deletion date.
|
||||||
|
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||||
|
# SEND_PURGE_SCHEDULE="0 5 * * * *"
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that checks for trashed items to delete permanently.
|
||||||
|
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
|
||||||
|
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
|
||||||
|
|
||||||
## Enable extended logging, which shows timestamps and targets in the logs
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
# EXTENDED_LOGGING=true
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
@@ -81,9 +104,9 @@
|
|||||||
## Enable WAL for the DB
|
## Enable WAL for the DB
|
||||||
## Set to false to avoid enabling WAL during startup.
|
## Set to false to avoid enabling WAL during startup.
|
||||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||||
## this setting only prevents bitwarden_rs from automatically enabling it on start.
|
## this setting only prevents vaultwarden from automatically enabling it on start.
|
||||||
## Please read project wiki page about this setting first before changing the value as it can
|
## Please read project wiki page about this setting first before changing the value as it can
|
||||||
## cause performance degradation or might render the service unable to start.
|
## cause performance degradation or might render the service unable to start.
|
||||||
# ENABLE_DB_WAL=true
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
## Database connection retries
|
## Database connection retries
|
||||||
@@ -104,7 +127,8 @@
|
|||||||
## Icon blacklist Regex
|
## Icon blacklist Regex
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
|
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
## Any IP which is not defined as a global IP will be blacklisted.
|
||||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
@@ -168,22 +192,30 @@
|
|||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
# INVITATIONS_ALLOWED=true
|
# INVITATIONS_ALLOWED=true
|
||||||
## Name shown in the invitation emails that don't come from a specific organization
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
# INVITATION_ORG_NAME=Bitwarden_RS
|
# INVITATION_ORG_NAME=Vaultwarden
|
||||||
|
|
||||||
## Per-organization attachment limit (KB)
|
## Per-organization attachment storage limit (KB)
|
||||||
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
## Max kilobytes of attachment storage allowed per organization.
|
||||||
|
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||||
# ORG_ATTACHMENT_LIMIT=
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
## Per-user attachment limit (KB).
|
## Per-user attachment storage limit (KB)
|
||||||
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
## Max kilobytes of attachment storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||||
# USER_ATTACHMENT_LIMIT=
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
|
||||||
|
## Number of days to wait before auto-deleting a trashed item.
|
||||||
|
## If unset (the default), trashed items are not auto-deleted.
|
||||||
|
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||||
|
# TRASH_AUTO_DELETE_DAYS=
|
||||||
|
|
||||||
## Controls the PBBKDF password iterations to apply on the server
|
## Controls the PBBKDF password iterations to apply on the server
|
||||||
## The change only applies when the password is changed
|
## The change only applies when the password is changed
|
||||||
# PASSWORD_ITERATIONS=100000
|
# PASSWORD_ITERATIONS=100000
|
||||||
|
|
||||||
## Whether password hint should be sent into the error response when the client request it
|
## Controls whether a password hint should be shown directly in the web page if
|
||||||
# SHOW_PASSWORD_HINT=true
|
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||||
|
## as this provides unauthenticated access to potentially sensitive data.
|
||||||
|
# SHOW_PASSWORD_HINT=false
|
||||||
|
|
||||||
## Domain settings
|
## Domain settings
|
||||||
## The domain must match the address from where you access the server
|
## The domain must match the address from where you access the server
|
||||||
@@ -228,23 +260,24 @@
|
|||||||
## You can disable this, so that only the current TOTP Code is allowed.
|
## You can disable this, so that only the current TOTP Code is allowed.
|
||||||
## Keep in mind that when a sever drifts out of time, valid codes could be marked as invalid.
|
## Keep in mind that when a sever drifts out of time, valid codes could be marked as invalid.
|
||||||
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
||||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT = false
|
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
||||||
|
|
||||||
## Rocket specific settings, check Rocket documentation to learn more
|
## Rocket specific settings
|
||||||
# ROCKET_ENV=staging
|
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
|
||||||
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
|
# ROCKET_ADDRESS=0.0.0.0
|
||||||
# ROCKET_PORT=8000
|
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
||||||
|
# ROCKET_WORKERS=10
|
||||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||||
|
|
||||||
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
||||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||||
# SMTP_HOST=smtp.domain.tld
|
# SMTP_HOST=smtp.domain.tld
|
||||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
# SMTP_FROM=vaultwarden@domain.tld
|
||||||
# SMTP_FROM_NAME=Bitwarden_RS
|
# SMTP_FROM_NAME=Vaultwarden
|
||||||
# SMTP_PORT=587
|
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||||
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true.
|
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||||
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work.
|
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||||
# SMTP_USERNAME=username
|
# SMTP_USERNAME=username
|
||||||
# SMTP_PASSWORD=password
|
# SMTP_PASSWORD=password
|
||||||
# SMTP_TIMEOUT=15
|
# SMTP_TIMEOUT=15
|
||||||
@@ -259,6 +292,22 @@
|
|||||||
## but might need to be changed in case it trips some anti-spam filters
|
## but might need to be changed in case it trips some anti-spam filters
|
||||||
# HELO_NAME=
|
# HELO_NAME=
|
||||||
|
|
||||||
|
## SMTP debugging
|
||||||
|
## When set to true this will output very detailed SMTP messages.
|
||||||
|
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
|
# SMTP_DEBUG=false
|
||||||
|
|
||||||
|
## Accept Invalid Hostnames
|
||||||
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
|
## Accept Invalid Certificates
|
||||||
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||||
|
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||||
|
|
||||||
## Require new device emails. When a user logs in an email is required to be sent.
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
## If sending the email fails the login attempt will fail!!
|
## If sending the email fails the login attempt will fail!!
|
||||||
# REQUIRE_DEVICE_EMAIL=false
|
# REQUIRE_DEVICE_EMAIL=false
|
||||||
|
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Ignore vendored scripts in GitHub stats
|
||||||
|
src/static/scripts/* linguist-vendored
|
||||||
|
|
60
.github/ISSUE_TEMPLATE/bug_report.md
vendored
60
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,42 +1,66 @@
|
|||||||
---
|
---
|
||||||
name: Bug report
|
name: Bug report
|
||||||
about: Create a report to help us improve
|
about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
||||||
title: ''
|
title: ''
|
||||||
labels: ''
|
labels: ''
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
<!--
|
||||||
|
# ###
|
||||||
|
NOTE: Please update to the latest version of vaultwarden before reporting an issue!
|
||||||
|
This saves you and us a lot of time and troubleshooting.
|
||||||
|
See:
|
||||||
|
* https://github.com/dani-garcia/vaultwarden/issues/1180
|
||||||
|
* https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image
|
||||||
|
# ###
|
||||||
|
-->
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Please fill out the following template to make solving your problem easier and faster for us.
|
Please fill out the following template to make solving your problem easier and faster for us.
|
||||||
This is only a guideline. If you think that parts are unneccessary for your issue, feel free to remove them.
|
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
||||||
|
|
||||||
Remember to hide/obfuscate personal and confidential information,
|
Remember to hide/redact personal or confidential information,
|
||||||
such as names, global IP/DNS adresses and especially passwords, if neccessary.
|
such as passwords, IP addresses, and DNS names as appropriate.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
### Subject of the issue
|
### Subject of the issue
|
||||||
<!-- Describe your issue here.-->
|
<!-- Describe your issue here. -->
|
||||||
|
|
||||||
|
### Deployment environment
|
||||||
|
|
||||||
|
<!--
|
||||||
|
=========================================================================================
|
||||||
|
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
|
||||||
|
That will auto-generate most of the info requested in this section.
|
||||||
|
=========================================================================================
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
||||||
|
<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden -->
|
||||||
|
<!-- Remember to check if your issue exists on the latest version first! -->
|
||||||
|
* vaultwarden version:
|
||||||
|
|
||||||
|
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
||||||
|
* Install method:
|
||||||
|
|
||||||
|
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
|
||||||
|
|
||||||
### Your environment
|
|
||||||
<!-- The version number, obtained from the logs or the admin page -->
|
|
||||||
* Bitwarden_rs version:
|
|
||||||
<!-- How the server was installed: Docker image / package / built from source -->
|
|
||||||
* Install method:
|
|
||||||
* Clients used: <!-- if applicable -->
|
|
||||||
* Reverse proxy and version: <!-- if applicable -->
|
* Reverse proxy and version: <!-- if applicable -->
|
||||||
* Version of mysql/postgresql: <!-- if applicable -->
|
|
||||||
* Other relevant information:
|
* MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
|
||||||
|
|
||||||
|
* Other relevant details:
|
||||||
|
|
||||||
### Steps to reproduce
|
### Steps to reproduce
|
||||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
||||||
and how did you start bitwarden_rs? -->
|
and how did you start vaultwarden? -->
|
||||||
|
|
||||||
### Expected behaviour
|
### Expected behaviour
|
||||||
<!-- Tell us what should happen -->
|
<!-- Tell us what you expected to happen -->
|
||||||
|
|
||||||
### Actual behaviour
|
### Actual behaviour
|
||||||
<!-- Tell us what happens instead -->
|
<!-- Tell us what actually happened -->
|
||||||
|
|
||||||
### Relevant logs
|
### Troubleshooting data
|
||||||
<!-- Share some logfiles, screenshots or output of relevant programs with us. -->
|
<!-- Share any log files, screenshots, or other relevant troubleshooting data -->
|
||||||
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: Discourse forum for vaultwarden
|
||||||
|
url: https://vaultwarden.discourse.group/
|
||||||
|
about: Use this forum to request features or get help with usage/configuration.
|
||||||
|
- name: GitHub Discussions for vaultwarden
|
||||||
|
url: https://github.com/dani-garcia/vaultwarden/discussions
|
||||||
|
about: An alternative to the Discourse forum, if this is easier for you.
|
11
.github/ISSUE_TEMPLATE/feature_request.md
vendored
11
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project
|
|
||||||
title: ''
|
|
||||||
labels: better for forum
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# Please submit all your feature requests to the forum
|
|
||||||
Link: https://bitwardenrs.discourse.group/c/feature-requests
|
|
@@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
name: Help with installation/configuration
|
|
||||||
about: Any questions about the setup of bitwarden_rs
|
|
||||||
title: ''
|
|
||||||
labels: better for forum
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# Please submit all your third party help requests to the forum
|
|
||||||
Link: https://bitwardenrs.discourse.group/c/help
|
|
@@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
name: Help with proxy/database/NAS setup
|
|
||||||
about: Any questions about third party software
|
|
||||||
title: ''
|
|
||||||
labels: better for forum
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# Please submit all your third party help requests to the forum
|
|
||||||
Link: https://bitwardenrs.discourse.group/c/third-party-help
|
|
BIN
.github/security-contact.gif
vendored
Normal file
BIN
.github/security-contact.gif
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.3 KiB |
181
.github/workflows/build.yml
vendored
Normal file
181
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
name: Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths-ignore:
|
||||||
|
- "*.md"
|
||||||
|
- "*.txt"
|
||||||
|
- ".dockerignore"
|
||||||
|
- ".env.template"
|
||||||
|
- ".gitattributes"
|
||||||
|
- ".gitignore"
|
||||||
|
- "azure-pipelines.yml"
|
||||||
|
- "docker/**"
|
||||||
|
- "hooks/**"
|
||||||
|
- "tools/**"
|
||||||
|
- ".github/FUNDING.yml"
|
||||||
|
- ".github/ISSUE_TEMPLATE/**"
|
||||||
|
- ".github/security-contact.gif"
|
||||||
|
pull_request:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths-ignore:
|
||||||
|
- "*.md"
|
||||||
|
- "*.txt"
|
||||||
|
- ".dockerignore"
|
||||||
|
- ".env.template"
|
||||||
|
- ".gitattributes"
|
||||||
|
- ".gitignore"
|
||||||
|
- "azure-pipelines.yml"
|
||||||
|
- "docker/**"
|
||||||
|
- "hooks/**"
|
||||||
|
- "tools/**"
|
||||||
|
- ".github/FUNDING.yml"
|
||||||
|
- ".github/ISSUE_TEMPLATE/**"
|
||||||
|
- ".github/security-contact.gif"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
# Make warnings errors, this is to prevent warnings slipping through.
|
||||||
|
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||||
|
env:
|
||||||
|
RUSTFLAGS: "-D warnings"
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
channel:
|
||||||
|
- nightly
|
||||||
|
# - stable
|
||||||
|
target-triple:
|
||||||
|
- x86_64-unknown-linux-gnu
|
||||||
|
# - x86_64-unknown-linux-musl
|
||||||
|
include:
|
||||||
|
- target-triple: x86_64-unknown-linux-gnu
|
||||||
|
host-triple: x86_64-unknown-linux-gnu
|
||||||
|
features: [sqlite,mysql,postgresql] # Remember to update the `cargo test` to match the amount of features
|
||||||
|
channel: nightly
|
||||||
|
os: ubuntu-18.04
|
||||||
|
ext: ""
|
||||||
|
# - target-triple: x86_64-unknown-linux-gnu
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,mysql,postgresql"
|
||||||
|
# channel: stable
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext: ""
|
||||||
|
|
||||||
|
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
|
# Install musl-tools when needed
|
||||||
|
- name: Install musl tools
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
|
||||||
|
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
|
||||||
|
# End Install musl-tools when needed
|
||||||
|
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
- name: Install dependencies Ubuntu
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||||
|
if: startsWith( matrix.os, 'ubuntu' )
|
||||||
|
# End Install dependencies
|
||||||
|
|
||||||
|
|
||||||
|
# Enable Rust Caching
|
||||||
|
- uses: Swatinem/rust-cache@v1
|
||||||
|
# End Enable Rust Caching
|
||||||
|
|
||||||
|
|
||||||
|
# Uses the rust-toolchain file to determine version
|
||||||
|
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||||
|
uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
target: ${{ matrix.target-triple }}
|
||||||
|
components: clippy, rustfmt
|
||||||
|
# End Uses the rust-toolchain file to determine version
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo tests (In release mode to speed up future builds)
|
||||||
|
# First test all features together, afterwards test them separately.
|
||||||
|
- name: "`cargo test --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||||
|
# Test single features
|
||||||
|
# 0: sqlite
|
||||||
|
- name: "`cargo test --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}
|
||||||
|
if: ${{ matrix.features[0] != '' }}
|
||||||
|
# 1: mysql
|
||||||
|
- name: "`cargo test --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}
|
||||||
|
if: ${{ matrix.features[1] != '' }}
|
||||||
|
# 2: postgresql
|
||||||
|
- name: "`cargo test --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}
|
||||||
|
if: ${{ matrix.features[2] != '' }}
|
||||||
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
||||||
|
- name: "`cargo clippy --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: clippy
|
||||||
|
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} -- -D warnings
|
||||||
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo fmt
|
||||||
|
- name: '`cargo fmt`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: fmt
|
||||||
|
args: --all -- --check
|
||||||
|
# End Run cargo fmt
|
||||||
|
|
||||||
|
|
||||||
|
# Build the binary
|
||||||
|
- name: "`cargo build --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: build
|
||||||
|
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||||
|
# End Build the binary
|
||||||
|
|
||||||
|
|
||||||
|
# Upload artifact to Github Actions
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||||
|
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||||
|
# End Upload artifact to Github Actions
|
||||||
|
|
||||||
|
|
||||||
|
## This is not used at the moment
|
||||||
|
## We could start using this when we can build static binaries
|
||||||
|
# Upload to github actions release
|
||||||
|
# - name: Release
|
||||||
|
# uses: Shopify/upload-to-release@1
|
||||||
|
# if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
# with:
|
||||||
|
# name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||||
|
# path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||||
|
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
# End Upload to github actions release
|
38
.github/workflows/hadolint.yml
vendored
Normal file
38
.github/workflows/hadolint.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
name: Hadolint
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths:
|
||||||
|
- "docker/**"
|
||||||
|
pull_request:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths:
|
||||||
|
- "docker/**"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
hadolint:
|
||||||
|
name: Validate Dockerfile syntax
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
|
# Download hadolint
|
||||||
|
- name: Download hadolint
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||||
|
sudo chmod +x /usr/local/bin/hadolint
|
||||||
|
env:
|
||||||
|
HADOLINT_VERSION: 2.5.0
|
||||||
|
# End Download hadolint
|
||||||
|
|
||||||
|
# Test Dockerfiles
|
||||||
|
- name: Run hadolint
|
||||||
|
shell: bash
|
||||||
|
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored --cached | xargs hadolint
|
||||||
|
# End Test Dockerfiles
|
148
.github/workflows/workspace.yml
vendored
148
.github/workflows/workspace.yml
vendored
@@ -1,148 +0,0 @@
|
|||||||
name: Workflow
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
paths-ignore:
|
|
||||||
- "**.md"
|
|
||||||
#pull_request:
|
|
||||||
# paths-ignore:
|
|
||||||
# - "**.md"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Build
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
db-backend: [sqlite, mysql, postgresql]
|
|
||||||
target:
|
|
||||||
- x86_64-unknown-linux-gnu
|
|
||||||
# - x86_64-unknown-linux-musl
|
|
||||||
# - x86_64-apple-darwin
|
|
||||||
# - x86_64-pc-windows-msvc
|
|
||||||
include:
|
|
||||||
- target: x86_64-unknown-linux-gnu
|
|
||||||
os: ubuntu-latest
|
|
||||||
ext:
|
|
||||||
# - target: x86_64-unknown-linux-musl
|
|
||||||
# os: ubuntu-latest
|
|
||||||
# ext:
|
|
||||||
# - target: x86_64-apple-darwin
|
|
||||||
# os: macOS-latest
|
|
||||||
# ext:
|
|
||||||
# - target: x86_64-pc-windows-msvc
|
|
||||||
# os: windows-latest
|
|
||||||
# ext: .exe
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v1
|
|
||||||
|
|
||||||
# - name: Cache choco cache
|
|
||||||
# uses: actions/cache@v1.0.3
|
|
||||||
# if: matrix.os == 'windows-latest'
|
|
||||||
# with:
|
|
||||||
# path: ~\AppData\Local\Temp\chocolatey
|
|
||||||
# key: ${{ runner.os }}-choco-cache-${{ matrix.db-backend }}
|
|
||||||
|
|
||||||
- name: Cache vcpkg installed
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
with:
|
|
||||||
path: $VCPKG_ROOT/installed
|
|
||||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
|
||||||
env:
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
|
|
||||||
- name: Cache vcpkg downloads
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
with:
|
|
||||||
path: $VCPKG_ROOT/downloads
|
|
||||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
|
||||||
env:
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
|
|
||||||
# - name: Cache homebrew
|
|
||||||
# uses: actions/cache@v1.0.3
|
|
||||||
# if: matrix.os == 'macOS-latest'
|
|
||||||
# with:
|
|
||||||
# path: ~/Library/Caches/Homebrew
|
|
||||||
# key: ${{ runner.os }}-brew-cache
|
|
||||||
|
|
||||||
# - name: Cache apt
|
|
||||||
# uses: actions/cache@v1.0.3
|
|
||||||
# if: matrix.os == 'ubuntu-latest'
|
|
||||||
# with:
|
|
||||||
# path: /var/cache/apt/archives
|
|
||||||
# key: ${{ runner.os }}-apt-cache
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
- name: Install dependencies macOS
|
|
||||||
run: brew update; brew install openssl sqlite libpq mysql
|
|
||||||
if: matrix.os == 'macOS-latest'
|
|
||||||
|
|
||||||
- name: Install dependencies Ubuntu
|
|
||||||
run: sudo apt-get update && sudo apt-get install --no-install-recommends openssl sqlite libpq-dev libmysql++-dev
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
|
||||||
|
|
||||||
- name: Install dependencies Windows
|
|
||||||
run: vcpkg integrate install; vcpkg install sqlite3:x64-windows openssl:x64-windows libpq:x64-windows libmysql:x64-windows
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
env:
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
# End Install dependencies
|
|
||||||
|
|
||||||
# Install rust nightly toolchain
|
|
||||||
- name: Cache cargo registry
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/registry
|
|
||||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Cache cargo index
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/git
|
|
||||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Cache cargo build
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: target
|
|
||||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Install latest nightly
|
|
||||||
uses: actions-rs/toolchain@v1.0.5
|
|
||||||
with:
|
|
||||||
# Uses rust-toolchain to determine version
|
|
||||||
profile: minimal
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
|
|
||||||
# Build
|
|
||||||
- name: Build Win
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
run: cargo.exe build --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
|
||||||
env:
|
|
||||||
RUSTFLAGS: -Ctarget-feature=+crt-static
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
|
|
||||||
- name: Build macOS / Ubuntu
|
|
||||||
if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest'
|
|
||||||
run: cargo build --verbose --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
|
||||||
|
|
||||||
# Test
|
|
||||||
- name: Run tests
|
|
||||||
run: cargo test --features ${{ matrix.db-backend }}
|
|
||||||
|
|
||||||
# Upload & Release
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-artifact@v1.0.0
|
|
||||||
with:
|
|
||||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
|
||||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
|
||||||
|
|
||||||
- name: Release
|
|
||||||
uses: Shopify/upload-to-release@1.0.0
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
with:
|
|
||||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
|
||||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
37
.pre-commit-config.yaml
Normal file
37
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.0.1
|
||||||
|
hooks:
|
||||||
|
- id: check-yaml
|
||||||
|
- id: check-json
|
||||||
|
- id: check-toml
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-case-conflict
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: detect-private-key
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: fmt
|
||||||
|
name: fmt
|
||||||
|
description: Format files with cargo fmt.
|
||||||
|
entry: cargo fmt
|
||||||
|
language: system
|
||||||
|
types: [rust]
|
||||||
|
args: ["--", "--check"]
|
||||||
|
- id: cargo-test
|
||||||
|
name: cargo test
|
||||||
|
description: Test the package for errors.
|
||||||
|
entry: cargo test
|
||||||
|
language: system
|
||||||
|
args: ["--features", "sqlite,mysql,postgresql", "--"]
|
||||||
|
types: [rust]
|
||||||
|
pass_filenames: false
|
||||||
|
- id: cargo-clippy
|
||||||
|
name: cargo clippy
|
||||||
|
description: Lint Rust sources
|
||||||
|
entry: cargo clippy
|
||||||
|
language: system
|
||||||
|
args: ["--features", "sqlite,mysql,postgresql", "--", "-D", "warnings"]
|
||||||
|
types: [rust]
|
||||||
|
pass_filenames: false
|
21
.travis.yml
21
.travis.yml
@@ -1,21 +0,0 @@
|
|||||||
dist: xenial
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
- HADOLINT_VERSION=1.17.1
|
|
||||||
|
|
||||||
language: rust
|
|
||||||
rust: nightly
|
|
||||||
cache: cargo
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
|
|
||||||
- sudo chmod +rx /usr/local/bin/hadolint
|
|
||||||
- rustup set profile minimal
|
|
||||||
|
|
||||||
# Nothing to install
|
|
||||||
install: true
|
|
||||||
script:
|
|
||||||
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
|
|
||||||
- cargo test --features "sqlite"
|
|
||||||
- cargo test --features "mysql"
|
|
1930
Cargo.lock
generated
1930
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
91
Cargo.toml
91
Cargo.toml
@@ -1,10 +1,10 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bitwarden_rs"
|
name = "vaultwarden"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/bitwarden_rs"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = "GPL-3.0-only"
|
license = "GPL-3.0-only"
|
||||||
publish = false
|
publish = false
|
||||||
@@ -28,110 +28,127 @@ syslog = "4.0.1"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
||||||
rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
rocket = { version = "=0.5.0-dev", features = ["tls"], default-features = false }
|
||||||
rocket_contrib = "0.5.0-dev"
|
rocket_contrib = "=0.5.0-dev"
|
||||||
|
|
||||||
# HTTP client
|
# HTTP client
|
||||||
reqwest = { version = "0.10.8", features = ["blocking", "json"] }
|
reqwest = { version = "0.11.4", features = ["blocking", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||||
|
|
||||||
|
# Used for custom short lived cookie jar
|
||||||
|
cookie = "0.15.1"
|
||||||
|
cookie_store = "0.15.0"
|
||||||
|
bytes = "1.0.1"
|
||||||
|
url = "2.2.2"
|
||||||
|
|
||||||
# multipart/form-data support
|
# multipart/form-data support
|
||||||
multipart = { version = "0.17.0", features = ["server"], default-features = false }
|
multipart = { version = "0.18.0", features = ["server"], default-features = false }
|
||||||
|
|
||||||
# WebSockets library
|
# WebSockets library
|
||||||
ws = "0.9.1"
|
ws = { version = "0.11.0", package = "parity-ws" }
|
||||||
|
|
||||||
# MessagePack library
|
# MessagePack library
|
||||||
rmpv = "0.4.5"
|
rmpv = "0.4.7"
|
||||||
|
|
||||||
# Concurrent hashmap implementation
|
# Concurrent hashmap implementation
|
||||||
chashmap = "2.2.2"
|
chashmap = "2.2.2"
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = "1.0.115"
|
serde = { version = "1.0.126", features = ["derive"] }
|
||||||
serde_derive = "1.0.115"
|
serde_json = "1.0.64"
|
||||||
serde_json = "1.0.57"
|
|
||||||
|
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.11"
|
log = "0.4.14"
|
||||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "1.4.5", features = [ "chrono", "r2d2"] }
|
diesel = { version = "1.4.7", features = [ "chrono", "r2d2"] }
|
||||||
diesel_migrations = "1.4.0"
|
diesel_migrations = "1.4.0"
|
||||||
|
|
||||||
# Bundled SQLite
|
# Bundled SQLite
|
||||||
libsqlite3-sys = { version = "0.18.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.22.2", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = "0.7.3"
|
rand = "0.8.4"
|
||||||
ring = "0.16.15"
|
ring = "0.16.20"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "0.8.1", features = ["v4"] }
|
uuid = { version = "0.8.2", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = "0.4.15"
|
chrono = { version = "0.4.19", features = ["serde"] }
|
||||||
chrono-tz = "0.5.3"
|
chrono-tz = "0.5.3"
|
||||||
time = "0.2.18"
|
time = "0.2.27"
|
||||||
|
|
||||||
|
# Job scheduler
|
||||||
|
job_scheduler = "1.2.1"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
oath = "0.10.2"
|
oath = "0.10.2"
|
||||||
|
|
||||||
# Data encoding library
|
# Data encoding library
|
||||||
data-encoding = "2.3.0"
|
data-encoding = "2.3.2"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "7.2.0"
|
jsonwebtoken = "7.2.0"
|
||||||
|
|
||||||
# U2F library
|
# U2F library
|
||||||
u2f = "0.2.0"
|
u2f = "0.2.0"
|
||||||
|
webauthn-rs = "=0.3.0-alpha.9"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.9.1", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenv = { version = "0.15.0", default-features = false }
|
dotenv = { version = "0.15.0", default-features = false }
|
||||||
|
|
||||||
# Lazy initialization
|
# Lazy initialization
|
||||||
once_cell = "1.4.1"
|
once_cell = "1.8.0"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.12"
|
num-traits = "0.2.14"
|
||||||
num-derive = "0.3.2"
|
num-derive = "0.3.3"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.10.0-alpha.2", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname"], default-features = false }
|
tracing = { version = "0.1.26", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled.
|
||||||
newline-converter = "0.1.0"
|
lettre = { version = "0.10.0-rc.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||||
|
|
||||||
# Template library
|
# Template library
|
||||||
handlebars = { version = "3.4.0", features = ["dir_source"] }
|
handlebars = { version = "4.1.0", features = ["dir_source"] }
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
soup = "0.5.0"
|
html5ever = "0.25.1"
|
||||||
regex = "1.3.9"
|
markup5ever_rcdom = "0.1.0"
|
||||||
|
regex = { version = "1.5.4", features = ["std", "perf"], default-features = false }
|
||||||
data-url = "0.1.0"
|
data-url = "0.1.0"
|
||||||
|
|
||||||
# Used by U2F, JWT and Postgres
|
# Used by U2F, JWT and Postgres
|
||||||
openssl = "0.10.30"
|
openssl = "0.10.35"
|
||||||
|
|
||||||
# URL encoding library
|
# URL encoding library
|
||||||
percent-encoding = "2.1.0"
|
percent-encoding = "2.1.0"
|
||||||
# Punycode conversion
|
# Punycode conversion
|
||||||
idna = "0.2.0"
|
idna = "0.2.3"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
structopt = "0.3.17"
|
pico-args = "0.4.2"
|
||||||
|
|
||||||
# Logging panics to logfile instead stderr only
|
# Logging panics to logfile instead stderr only
|
||||||
backtrace = "0.3.50"
|
backtrace = "0.3.60"
|
||||||
|
|
||||||
# Macro ident concatenation
|
# Macro ident concatenation
|
||||||
paste = "1.0.0"
|
paste = "1.0.5"
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# Use newest ring
|
# Use newest ring
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '1010f6a2a88fac899dec0cd2f642156908038a53' }
|
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '1010f6a2a88fac899dec0cd2f642156908038a53' }
|
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '7f1bd6ce1c2fde599a757302a843a60e714c5f72' }
|
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = 'eb7330b5296c0d43816d1346211b74182bb4ae37' }
|
||||||
|
|
||||||
|
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
|
||||||
|
# to any issues or PRs for almost a year (as of April 2021). This hopefully
|
||||||
|
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
|
||||||
|
# In particular, `cron` has since implemented parsing of some common syntax
|
||||||
|
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
|
||||||
|
job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' }
|
||||||
|
65
README.md
65
README.md
@@ -1,15 +1,16 @@
|
|||||||
### This is a Bitwarden server API implementation written in Rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||||
|
|
||||||
|
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
[](https://travis-ci.org/dani-garcia/bitwarden_rs)
|
[](https://hub.docker.com/r/vaultwarden/server)
|
||||||
[](https://hub.docker.com/r/bitwardenrs/server)
|
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||||
[](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
|
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||||
[](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
|
[](https://github.com/dani-garcia/vaultwarden/blob/master/LICENSE.txt)
|
||||||
[](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
|
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||||
[](https://matrix.to/#/#bitwarden_rs:matrix.org)
|
|
||||||
|
|
||||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
|
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||||
|
|
||||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||||
|
|
||||||
@@ -33,29 +34,57 @@ Basically full implementation of Bitwarden API is provided including:
|
|||||||
Pull the docker image and mount a volume from the host for persistent storage:
|
Pull the docker image and mount a volume from the host for persistent storage:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker pull bitwardenrs/server:latest
|
docker pull vaultwarden/server:latest
|
||||||
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 bitwardenrs/server:latest
|
docker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server:latest
|
||||||
```
|
```
|
||||||
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.
|
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
||||||
|
|
||||||
This can be configured in [bitwarden_rs directly](https://github.com/dani-garcia/bitwarden_rs/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/bitwarden_rs/wiki/Proxy-examples)).
|
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||||
|
|
||||||
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) for more information on how to configure and run the bitwarden_rs server.
|
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||||
|
|
||||||
## Get in touch
|
## Get in touch
|
||||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://bitwardenrs.discourse.group/).
|
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://vaultwarden.discourse.group/).
|
||||||
|
|
||||||
If you spot any bugs or crashes with bitwarden_rs itself, please [create an issue](https://github.com/dani-garcia/bitwarden_rs/issues/). Make sure there aren't any similar issues open, though!
|
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure there aren't any similar issues open, though!
|
||||||
|
|
||||||
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
|
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||||
|
|
||||||
### Sponsors
|
### Sponsors
|
||||||
Thanks for your contribution to the project!
|
Thanks for your contribution to the project!
|
||||||
|
|
||||||
- [@ChonoN](https://github.com/ChonoN)
|
<table>
|
||||||
- [@themightychris](https://github.com/themightychris)
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/netdadaltd">
|
||||||
|
<img src="https://avatars.githubusercontent.com/u/77323954?s=75&v=4" width="75px;" alt="netdadaltd"/>
|
||||||
|
<br />
|
||||||
|
<sub><b>netDada Ltd.</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
<br/>
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/Gyarbij" style="width: 75px">
|
||||||
|
<sub><b>Chono N</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/themightychris">
|
||||||
|
<sub><b>Chris Alfano</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
45
SECURITY.md
Normal file
45
SECURITY.md
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
Vaultwarden tries to prevent security issues but there could always slip something through.
|
||||||
|
If you believe you've found a security issue in our application, we encourage you to
|
||||||
|
notify us. We welcome working with you to resolve the issue promptly. Thanks in advance!
|
||||||
|
|
||||||
|
# Disclosure Policy
|
||||||
|
|
||||||
|
- Let us know as soon as possible upon discovery of a potential security issue, and we'll make every
|
||||||
|
effort to quickly resolve the issue.
|
||||||
|
- Provide us a reasonable amount of time to resolve the issue before any disclosure to the public or a
|
||||||
|
third-party. We may publicly disclose the issue before resolving it, if appropriate.
|
||||||
|
- Make a good faith effort to avoid privacy violations, destruction of data, and interruption or
|
||||||
|
degradation of our service. Only interact with accounts you own or with explicit permission of the
|
||||||
|
account holder.
|
||||||
|
|
||||||
|
# In-scope
|
||||||
|
|
||||||
|
- Security issues in any current release of Vaultwarden. Source code is available at https://github.com/dani-garcia/vaultwarden. This includes the current `latest` release and `main / testing` release.
|
||||||
|
|
||||||
|
# Exclusions
|
||||||
|
|
||||||
|
The following bug classes are out-of scope:
|
||||||
|
|
||||||
|
- Bugs that are already reported on Vaultwarden's issue tracker (https://github.com/dani-garcia/vaultwarden/issues)
|
||||||
|
- Bugs that are not part of Vaultwarden, like on the the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated
|
||||||
|
- Issues in an upstream software dependency (ex: Rust, or External Libraries) which are already reported to the upstream maintainer
|
||||||
|
- Attacks requiring physical access to a user's device
|
||||||
|
- Issues related to software or protocols not under Vaultwarden's control
|
||||||
|
- Vulnerabilities in outdated versions of Vaultwarden
|
||||||
|
- Missing security best practices that do not directly lead to a vulnerability (You may still report them as a normal issue)
|
||||||
|
- Issues that do not have any impact on the general public
|
||||||
|
|
||||||
|
While researching, we'd like to ask you to refrain from:
|
||||||
|
|
||||||
|
- Denial of service
|
||||||
|
- Spamming
|
||||||
|
- Social engineering (including phishing) of Vaultwarden developers, contributors or users
|
||||||
|
|
||||||
|
Thank you for helping keep Vaultwarden and our users safe!
|
||||||
|
|
||||||
|
# How to contact us
|
||||||
|
|
||||||
|
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (user: `@danig:matrix.org`)
|
||||||
|
- You can send an  to report a security issue.
|
||||||
|
- If you want to send an encrypted email you can use the following GPG key:<br>
|
||||||
|
https://keyserver.ubuntu.com/pks/lookup?search=0xB9B7A108373276BF3C0406F9FC8A7D14C3CD543A&fingerprint=on&op=index
|
@@ -1,25 +0,0 @@
|
|||||||
pool:
|
|
||||||
vmImage: 'Ubuntu-16.04'
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- script: |
|
|
||||||
ls -la
|
|
||||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $(cat rust-toolchain) --profile=minimal
|
|
||||||
echo "##vso[task.prependpath]$HOME/.cargo/bin"
|
|
||||||
displayName: 'Install Rust'
|
|
||||||
|
|
||||||
- script: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y libmysql++-dev
|
|
||||||
displayName: Install libmysql
|
|
||||||
|
|
||||||
- script: |
|
|
||||||
rustc -Vv
|
|
||||||
cargo -V
|
|
||||||
displayName: Query rust and cargo versions
|
|
||||||
|
|
||||||
- script : cargo test --features "sqlite"
|
|
||||||
displayName: 'Test project with sqlite backend'
|
|
||||||
|
|
||||||
- script : cargo test --features "mysql"
|
|
||||||
displayName: 'Test project with mysql backend'
|
|
14
build.rs
14
build.rs
@@ -1,7 +1,7 @@
|
|||||||
use std::process::Command;
|
|
||||||
use std::env;
|
use std::env;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||||
#[cfg(feature = "sqlite")]
|
#[cfg(feature = "sqlite")]
|
||||||
println!("cargo:rustc-cfg=sqlite");
|
println!("cargo:rustc-cfg=sqlite");
|
||||||
@@ -11,8 +11,10 @@ fn main() {
|
|||||||
println!("cargo:rustc-cfg=postgresql");
|
println!("cargo:rustc-cfg=postgresql");
|
||||||
|
|
||||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
compile_error!(
|
||||||
|
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||||
|
);
|
||||||
|
|
||||||
if let Ok(version) = env::var("BWRS_VERSION") {
|
if let Ok(version) = env::var("BWRS_VERSION") {
|
||||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||||
@@ -56,12 +58,12 @@ fn read_git_info() -> Result<(), std::io::Error> {
|
|||||||
// Combined version
|
// Combined version
|
||||||
let version = if let Some(exact) = exact_tag {
|
let version = if let Some(exact) = exact_tag {
|
||||||
exact
|
exact
|
||||||
} else if &branch != "master" {
|
} else if &branch != "main" && &branch != "master" {
|
||||||
format!("{}-{} ({})", last_tag, rev_short, branch)
|
format!("{}-{} ({})", last_tag, rev_short, branch)
|
||||||
} else {
|
} else {
|
||||||
format!("{}-{}", last_tag, rev_short)
|
format!("{}-{}", last_tag, rev_short)
|
||||||
};
|
};
|
||||||
|
|
||||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||||
|
|
||||||
|
33
docker/Dockerfile.buildx
Normal file
33
docker/Dockerfile.buildx
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||||
|
# manifest, rather than the target arch. For example:
|
||||||
|
#
|
||||||
|
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||||
|
# amd64
|
||||||
|
#
|
||||||
|
# Recent versions of Docker have started printing a warning when the image's
|
||||||
|
# claimed arch doesn't match the host arch. For example:
|
||||||
|
#
|
||||||
|
# WARNING: The requested image's platform (linux/amd64) does not match the
|
||||||
|
# detected host platform (linux/arm/v7) and no specific platform was requested
|
||||||
|
#
|
||||||
|
# The image still works fine, but the spurious warning creates confusion.
|
||||||
|
#
|
||||||
|
# Docker doesn't seem to provide a way to directly set the arch of an image
|
||||||
|
# at build time. To resolve the build vs. target arch discrepancy, we use
|
||||||
|
# Docker Buildx to build a new set of images with the correct target arch.
|
||||||
|
#
|
||||||
|
# Docker Buildx uses this Dockerfile to build an image for each requested
|
||||||
|
# platform. Since the Dockerfile basically consists of a single `FROM`
|
||||||
|
# instruction, we're effectively telling Buildx to build a platform-specific
|
||||||
|
# image by simply copying the existing cross-built image and setting the
|
||||||
|
# correct target arch as a side effect.
|
||||||
|
#
|
||||||
|
# References:
|
||||||
|
#
|
||||||
|
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
||||||
|
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
||||||
|
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
||||||
|
#
|
||||||
|
ARG LOCAL_REPO
|
||||||
|
ARG DOCKER_TAG
|
||||||
|
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
@@ -1,64 +1,90 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
{% set build_stage_base_image = "rust:1.46" %}
|
{% set build_stage_base_image = "rust:1.53" %}
|
||||||
{% if "alpine" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
{% if "amd64" in target_file %}
|
{% if "amd64" in target_file %}
|
||||||
{% set build_stage_base_image = "clux/muslrust:nightly-2020-10-02" %}
|
{% set build_stage_base_image = "clux/muslrust:nightly-2021-06-24" %}
|
||||||
{% set runtime_stage_base_image = "alpine:3.12" %}
|
{% set runtime_stage_base_image = "alpine:3.14" %}
|
||||||
{% set package_arch_name = "" %}
|
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||||
{% elif "arm32v7" in target_file %}
|
{% elif "armv7" in target_file %}
|
||||||
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.12" %}
|
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.14" %}
|
||||||
{% set package_arch_name = "" %}
|
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% elif "amd64" in target_file %}
|
{% elif "amd64" in target_file %}
|
||||||
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||||
{% set package_arch_name = "" %}
|
{% elif "arm64" in target_file %}
|
||||||
{% elif "arm64v8" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||||
{% set package_arch_name = "arm64" %}
|
{% set package_arch_name = "arm64" %}
|
||||||
{% elif "arm32v6" in target_file %}
|
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||||
|
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||||
|
{% elif "armv6" in target_file %}
|
||||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||||
{% set package_arch_name = "armel" %}
|
{% set package_arch_name = "armel" %}
|
||||||
{% elif "arm32v7" in target_file %}
|
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||||
|
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||||
|
{% elif "armv7" in target_file %}
|
||||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||||
{% set package_arch_name = "armhf" %}
|
{% set package_arch_name = "armhf" %}
|
||||||
|
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||||
|
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
{% if package_arch_name is defined %}
|
||||||
{% if package_arch_name == "" %}
|
{% set package_arch_prefix = ":" + package_arch_name %}
|
||||||
|
{% else %}
|
||||||
{% set package_arch_prefix = "" %}
|
{% set package_arch_prefix = "" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if package_arch_target is defined %}
|
||||||
|
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
||||||
|
{% else %}
|
||||||
|
{% set package_arch_target_param = "" %}
|
||||||
|
{% endif %}
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_image_hash = "sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303" %}
|
{% set vault_version = "2.21.1" %}
|
||||||
{% raw %}
|
{% set vault_image_digest = "sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5" %}
|
||||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# It can be viewed in multiple ways:
|
# Using the digest instead of the tag name provides better security,
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the console, with the following commands:
|
# be changed to point to a malicious image.
|
||||||
# docker pull bitwardenrs/web-vault:v2.16.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.16.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
{% endraw %}
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v{{ vault_version }}
|
||||||
|
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:v{{ vault_version }}
|
||||||
|
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
|
# [vaultwarden/web-vault:v{{ vault_version }}]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM {{ build_stage_base_image }} as build
|
FROM {{ build_stage_base_image }} as build
|
||||||
|
|
||||||
{% if "alpine" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
# Alpine only works on SQlite
|
{% if "amd64" in target_file %}
|
||||||
ARG DB=sqlite
|
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||||
|
ARG DB=sqlite,postgresql
|
||||||
|
{% set features = "sqlite,postgresql" %}
|
||||||
|
{% else %}
|
||||||
|
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||||
|
# We now also need to add vendored_openssl, because the current base image we use to build has OpenSSL removed.
|
||||||
|
ARG DB=sqlite,vendored_openssl
|
||||||
|
{% set features = "sqlite" %}
|
||||||
|
{% endif %}
|
||||||
{% else %}
|
{% else %}
|
||||||
# Debian-based builds support multidb
|
# Debian-based builds support multidb
|
||||||
ARG DB=sqlite,mysql,postgresql
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% set features = "sqlite,mysql,postgresql" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
@@ -68,7 +94,9 @@ RUN rustup set profile minimal
|
|||||||
{% if "alpine" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
ENV RUSTFLAGS='-C link-arg=-s'
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
{% if "armv7" in target_file %}
|
||||||
|
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||||
|
{% endif %}
|
||||||
{% elif "arm" in target_file %}
|
{% elif "arm" in target_file %}
|
||||||
# Install required build libs for {{ package_arch_name }} architecture.
|
# Install required build libs for {{ package_arch_name }} architecture.
|
||||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
@@ -83,46 +111,17 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||||||
libpq5{{ package_arch_prefix }} \
|
libpq5{{ package_arch_prefix }} \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
libmariadb-dev{{ package_arch_prefix }} \
|
libmariadb-dev{{ package_arch_prefix }} \
|
||||||
libmariadb-dev-compat{{ package_arch_prefix }}
|
libmariadb-dev-compat{{ package_arch_prefix }} \
|
||||||
|
gcc-{{ package_cross_compiler }} \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% if "arm64v8" in target_file %}
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
{% elif "arm32v6" in target_file %}
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
{% elif "arm32v7" in target_file and "alpine" not in target_file %}
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
{% endif %}
|
|
||||||
{% if "amd64" in target_file and "alpine" not in target_file %}
|
{% if "amd64" in target_file and "alpine" not in target_file %}
|
||||||
# Install DB packages
|
# Install DB packages
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
@@ -148,72 +147,31 @@ COPY ./build.rs ./build.rs
|
|||||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
||||||
# What we can do is a force install, because nothing important is overlapping each other.
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
RUN apt-get install -y libmariadb3:amd64 && \
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||||
mkdir -pv /tmp/dpkg && \
|
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||||
cd /tmp/dpkg && \
|
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||||
apt-get download libmariadb-dev-compat:amd64 && \
|
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||||
dpkg --force-all -i *.deb && \
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
rm -rf /tmp/dpkg
|
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
&& ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
||||||
|
|
||||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
||||||
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
ENV CROSS_COMPILE="1"
|
||||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
|
||||||
# Without this specific file the ld command will fail and compilation fails with it.
|
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% if "arm64v8" in target_file %}
|
{% endif %}
|
||||||
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
{% if package_arch_target is defined %}
|
||||||
|
RUN rustup target add {{ package_arch_target }}
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
{% elif "arm32v6" in target_file %}
|
|
||||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
|
||||||
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
|
||||||
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
{% endif -%}
|
|
||||||
{% else -%}
|
|
||||||
{% if "amd64" in target_file %}
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
{% if "alpine" in target_file %}
|
RUN cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
||||||
{% if "amd64" in target_file %}
|
&& find . -not -path "./target*" -delete
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
{% endif %}
|
|
||||||
{% elif "alpine" not in target_file %}
|
|
||||||
{% if "amd64" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
{% elif "arm64v8" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
{% elif "arm32v6" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -224,22 +182,11 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||||
{% if "alpine" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
{% if "amd64" in target_file %}
|
{% if "armv7" in target_file %}
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
# hadolint ignore=DL3059
|
||||||
{% elif "arm32v7" in target_file %}
|
RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
|
|
||||||
{% endif %}
|
|
||||||
{% elif "alpine" not in target_file %}
|
|
||||||
{% if "amd64" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
{% elif "arm64v8" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
{% elif "arm32v6" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
@@ -256,67 +203,54 @@ ENV SSL_CERT_DIR=/etc/ssl/certs
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
{% if "amd64" not in target_file %}
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
# Install needed libraries
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
{% if "alpine" in runtime_stage_base_image %}
|
{% if "alpine" in runtime_stage_base_image %}
|
||||||
RUN apk add --no-cache \
|
&& apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
curl \
|
curl \
|
||||||
{% if "sqlite" in target_file %}
|
dumb-init \
|
||||||
sqlite \
|
{% if "mysql" in features %}
|
||||||
{% elif "mysql" in target_file %}
|
|
||||||
mariadb-connector-c \
|
mariadb-connector-c \
|
||||||
{% elif "postgresql" in target_file %}
|
{% endif %}
|
||||||
|
{% if "postgresql" in features %}
|
||||||
postgresql-libs \
|
postgresql-libs \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
ca-certificates
|
ca-certificates
|
||||||
{% else %}
|
{% else %}
|
||||||
RUN apt-get update && apt-get install -y \
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
sqlite3 \
|
dumb-init \
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if "alpine" in target_file and "arm32v7" in target_file %}
|
|
||||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community catatonit
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
{% if "amd64" not in target_file %}
|
{% if "amd64" not in target_file %}
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
EXPOSE 3012
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
{% if "alpine" in target_file %}
|
{% if package_arch_target is defined %}
|
||||||
{% if "amd64" in target_file %}
|
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
{% else %}
|
||||||
{% elif "arm32v7" in target_file %}
|
COPY --from=build /app/target/release/vaultwarden .
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
|
|
||||||
{% endif %}
|
|
||||||
{% elif "alpine" not in target_file %}
|
|
||||||
{% if "arm64v8" in target_file %}
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
|
||||||
{% elif "arm32v6" in target_file %}
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
|
||||||
{% else %}
|
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
@@ -325,9 +259,5 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
{% if "alpine" in target_file and "arm32v7" in target_file %}
|
|
||||||
CMD ["catatonit", "/start.sh"]
|
|
||||||
{% else %}
|
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
{% endif %}
|
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
OBJECTS := $(shell find -mindepth 2 -name 'Dockerfile*')
|
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
||||||
|
|
||||||
all: $(OBJECTS)
|
all: $(OBJECTS)
|
||||||
|
|
||||||
|
@@ -1,24 +1,31 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.16.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.16.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303 as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||||
|
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||||
|
# [vaultwarden/web-vault:v2.21.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.46 as build
|
FROM rust:1.53 as build
|
||||||
|
|
||||||
# Debian-based builds support multidb
|
# Debian-based builds support multidb
|
||||||
ARG DB=sqlite,mysql,postgresql
|
ARG DB=sqlite,mysql,postgresql
|
||||||
@@ -49,8 +56,8 @@ COPY ./build.rs ./build.rs
|
|||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -72,27 +79,30 @@ ENV ROCKET_ENV "staging"
|
|||||||
ENV ROCKET_PORT=80
|
ENV ROCKET_PORT=80
|
||||||
ENV ROCKET_WORKERS=10
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
sqlite3 \
|
dumb-init \
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
EXPOSE 3012
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
COPY --from=build /app/target/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -100,6 +110,5 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
|
||||||
|
@@ -1,27 +1,34 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.16.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.16.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303 as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||||
|
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||||
|
# [vaultwarden/web-vault:v2.21.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM clux/muslrust:nightly-2020-10-02 as build
|
FROM clux/muslrust:nightly-2021-06-24 as build
|
||||||
|
|
||||||
# Alpine only works on SQlite
|
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||||
ARG DB=sqlite
|
ARG DB=sqlite,postgresql
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
@@ -32,7 +39,6 @@ RUN rustup set profile minimal
|
|||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
ENV RUSTFLAGS='-C link-arg=-s'
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
@@ -47,8 +53,8 @@ RUN rustup target add x86_64-unknown-linux-musl
|
|||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -64,29 +70,34 @@ RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM alpine:3.12
|
FROM alpine:3.14
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_ENV "staging"
|
||||||
ENV ROCKET_PORT=80
|
ENV ROCKET_PORT=80
|
||||||
ENV ROCKET_WORKERS=10
|
ENV ROCKET_WORKERS=10
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apk add --no-cache \
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
curl \
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
postgresql-libs \
|
||||||
ca-certificates
|
ca-certificates
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
EXPOSE 3012
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -94,6 +105,5 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
|
||||||
|
@@ -1,24 +1,31 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.16.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.16.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303 as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||||
|
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||||
|
# [vaultwarden/web-vault:v2.21.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.46 as build
|
FROM rust:1.53 as build
|
||||||
|
|
||||||
# Debian-based builds support multidb
|
# Debian-based builds support multidb
|
||||||
ARG DB=sqlite,mysql,postgresql
|
ARG DB=sqlite,mysql,postgresql
|
||||||
@@ -42,11 +49,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||||||
libpq5:arm64 \
|
libpq5:arm64 \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
libmariadb-dev:arm64 \
|
libmariadb-dev:arm64 \
|
||||||
libmariadb-dev-compat:arm64
|
libmariadb-dev-compat:arm64 \
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
gcc-aarch64-linux-gnu \
|
||||||
&& mkdir -p ~/.cargo \
|
&& mkdir -p ~/.cargo \
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||||
@@ -70,18 +73,15 @@ COPY ./build.rs ./build.rs
|
|||||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||||
# What we can do is a force install, because nothing important is overlapping each other.
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
RUN apt-get install -y libmariadb3:amd64 && \
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||||
mkdir -pv /tmp/dpkg && \
|
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||||
cd /tmp/dpkg && \
|
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||||
apt-get download libmariadb-dev-compat:amd64 && \
|
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||||
dpkg --force-all -i *.deb && \
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
rm -rf /tmp/dpkg
|
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
&& ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
||||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
|
||||||
# Without this specific file the ld command will fail and compilation fails with it.
|
|
||||||
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
|
||||||
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||||
ENV CROSS_COMPILE="1"
|
ENV CROSS_COMPILE="1"
|
||||||
@@ -92,8 +92,8 @@ RUN rustup target add aarch64-unknown-linux-gnu
|
|||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -115,21 +115,22 @@ ENV ROCKET_ENV "staging"
|
|||||||
ENV ROCKET_PORT=80
|
ENV ROCKET_PORT=80
|
||||||
ENV ROCKET_WORKERS=10
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
# Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
sqlite3 \
|
dumb-init \
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
# hadolint ignore=DL3059
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -138,9 +139,10 @@ EXPOSE 3012
|
|||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -148,6 +150,5 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
|
@@ -1,24 +1,31 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.16.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.16.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303 as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||||
|
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||||
|
# [vaultwarden/web-vault:v2.21.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.46 as build
|
FROM rust:1.53 as build
|
||||||
|
|
||||||
# Debian-based builds support multidb
|
# Debian-based builds support multidb
|
||||||
ARG DB=sqlite,mysql,postgresql
|
ARG DB=sqlite,mysql,postgresql
|
||||||
@@ -42,11 +49,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||||||
libpq5:armel \
|
libpq5:armel \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
libmariadb-dev:armel \
|
libmariadb-dev:armel \
|
||||||
libmariadb-dev-compat:armel
|
libmariadb-dev-compat:armel \
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
gcc-arm-linux-gnueabi \
|
||||||
&& mkdir -p ~/.cargo \
|
&& mkdir -p ~/.cargo \
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||||
@@ -70,18 +73,15 @@ COPY ./build.rs ./build.rs
|
|||||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||||
# What we can do is a force install, because nothing important is overlapping each other.
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
RUN apt-get install -y libmariadb3:amd64 && \
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||||
mkdir -pv /tmp/dpkg && \
|
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||||
cd /tmp/dpkg && \
|
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||||
apt-get download libmariadb-dev-compat:amd64 && \
|
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||||
dpkg --force-all -i *.deb && \
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
rm -rf /tmp/dpkg
|
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
&& ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
||||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
|
||||||
# Without this specific file the ld command will fail and compilation fails with it.
|
|
||||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
|
||||||
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||||
ENV CROSS_COMPILE="1"
|
ENV CROSS_COMPILE="1"
|
||||||
@@ -92,8 +92,8 @@ RUN rustup target add arm-unknown-linux-gnueabi
|
|||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -115,21 +115,22 @@ ENV ROCKET_ENV "staging"
|
|||||||
ENV ROCKET_PORT=80
|
ENV ROCKET_PORT=80
|
||||||
ENV ROCKET_WORKERS=10
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
# Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
sqlite3 \
|
dumb-init \
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
# hadolint ignore=DL3059
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -138,9 +139,10 @@ EXPOSE 3012
|
|||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -148,6 +150,5 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
|
@@ -1,24 +1,31 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.16.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.16.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303 as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||||
|
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||||
|
# [vaultwarden/web-vault:v2.21.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.46 as build
|
FROM rust:1.53 as build
|
||||||
|
|
||||||
# Debian-based builds support multidb
|
# Debian-based builds support multidb
|
||||||
ARG DB=sqlite,mysql,postgresql
|
ARG DB=sqlite,mysql,postgresql
|
||||||
@@ -42,11 +49,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||||||
libpq5:armhf \
|
libpq5:armhf \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
libmariadb-dev:armhf \
|
libmariadb-dev:armhf \
|
||||||
libmariadb-dev-compat:armhf
|
libmariadb-dev-compat:armhf \
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
gcc-arm-linux-gnueabihf \
|
||||||
&& mkdir -p ~/.cargo \
|
&& mkdir -p ~/.cargo \
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||||
@@ -70,18 +73,15 @@ COPY ./build.rs ./build.rs
|
|||||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||||
# What we can do is a force install, because nothing important is overlapping each other.
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
RUN apt-get install -y libmariadb3:amd64 && \
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||||
mkdir -pv /tmp/dpkg && \
|
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||||
cd /tmp/dpkg && \
|
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||||
apt-get download libmariadb-dev-compat:amd64 && \
|
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||||
dpkg --force-all -i *.deb && \
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
rm -rf /tmp/dpkg
|
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
&& ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
||||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
|
||||||
# Without this specific file the ld command will fail and compilation fails with it.
|
|
||||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
|
||||||
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||||
ENV CROSS_COMPILE="1"
|
ENV CROSS_COMPILE="1"
|
||||||
@@ -92,8 +92,8 @@ RUN rustup target add armv7-unknown-linux-gnueabihf
|
|||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -115,21 +115,22 @@ ENV ROCKET_ENV "staging"
|
|||||||
ENV ROCKET_PORT=80
|
ENV ROCKET_PORT=80
|
||||||
ENV ROCKET_WORKERS=10
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
# Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN mkdir /data \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
sqlite3 \
|
dumb-init \
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
# hadolint ignore=DL3059
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -138,9 +139,10 @@ EXPOSE 3012
|
|||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -148,6 +150,5 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
|
@@ -1,27 +1,35 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.16.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.16.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:e40228f94cead5e50af6575fb39850a002dad146dab6836e5da5663e6d214303 as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||||
|
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||||
|
# [vaultwarden/web-vault:v2.21.1]
|
||||||
|
#
|
||||||
|
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||||
|
|
||||||
# Alpine only works on SQlite
|
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||||
ARG DB=sqlite
|
# We now also need to add vendored_openssl, because the current base image we use to build has OpenSSL removed.
|
||||||
|
ARG DB=sqlite,vendored_openssl
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
@@ -31,7 +39,7 @@ RUN rustup set profile minimal
|
|||||||
|
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
ENV RUSTFLAGS='-C link-arg=-s'
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
@@ -47,8 +55,8 @@ RUN rustup target add armv7-unknown-linux-musleabihf
|
|||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
||||||
RUN find . -not -path "./target*" -delete
|
&& find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
@@ -60,29 +68,31 @@ RUN touch src/main.rs
|
|||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
|
# hadolint ignore=DL3059
|
||||||
|
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM balenalib/armv7hf-alpine:3.12
|
FROM balenalib/armv7hf-alpine:3.14
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_ENV "staging"
|
||||||
ENV ROCKET_PORT=80
|
ENV ROCKET_PORT=80
|
||||||
ENV ROCKET_WORKERS=10
|
ENV ROCKET_WORKERS=10
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-start" ]
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
# Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN apk add --no-cache \
|
RUN mkdir /data \
|
||||||
|
&& apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
curl \
|
curl \
|
||||||
|
dumb-init \
|
||||||
ca-certificates
|
ca-certificates
|
||||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community catatonit
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
|
# hadolint ignore=DL3059
|
||||||
RUN [ "cross-build-end" ]
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -91,9 +101,10 @@ EXPOSE 3012
|
|||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
|
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -101,6 +112,5 @@ COPY docker/start.sh /start.sh
|
|||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["catatonit", "/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
|
@@ -1,10 +1,20 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
if [ -r /etc/bitwarden_rs.sh ]; then
|
if [ -r /etc/vaultwarden.sh ]; then
|
||||||
|
. /etc/vaultwarden.sh
|
||||||
|
elif [ -r /etc/bitwarden_rs.sh ]; then
|
||||||
|
echo "### You are using the old /etc/bitwarden_rs.sh script, please migrate to /etc/vaultwarden.sh ###"
|
||||||
. /etc/bitwarden_rs.sh
|
. /etc/bitwarden_rs.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d /etc/bitwarden_rs.d ]; then
|
if [ -d /etc/vaultwarden.d ]; then
|
||||||
|
for f in /etc/vaultwarden.d/*.sh; do
|
||||||
|
if [ -r $f ]; then
|
||||||
|
. $f
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
elif [ -d /etc/bitwarden_rs.d ]; then
|
||||||
|
echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###"
|
||||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||||
if [ -r $f ]; then
|
if [ -r $f ]; then
|
||||||
. $f
|
. $f
|
||||||
@@ -12,4 +22,4 @@ if [ -d /etc/bitwarden_rs.d ]; then
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exec /bitwarden_rs "${@}"
|
exec /vaultwarden "${@}"
|
||||||
|
@@ -10,7 +10,7 @@ Docker Hub hooks provide these predefined [environment variables](https://docs.d
|
|||||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||||
|
|
||||||
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/database/OS combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
||||||
|
@@ -1,19 +1,16 @@
|
|||||||
# The default Debian-based images support these arches for all database connections
|
# The default Debian-based images support these arches for all database backends.
|
||||||
#
|
|
||||||
# Other images (Alpine-based) currently
|
|
||||||
# support only a subset of these.
|
|
||||||
arches=(
|
arches=(
|
||||||
amd64
|
amd64
|
||||||
arm32v6
|
armv6
|
||||||
arm32v7
|
armv7
|
||||||
arm64v8
|
arm64
|
||||||
)
|
)
|
||||||
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||||
# The Alpine build currently only works for amd64.
|
# The Alpine image build currently only works for certain arches.
|
||||||
os_suffix=.alpine
|
distro_suffix=.alpine
|
||||||
arches=(
|
arches=(
|
||||||
amd64
|
amd64
|
||||||
arm32v7
|
armv7
|
||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
|
33
hooks/build
33
hooks/build
@@ -4,11 +4,42 @@ echo ">>> Building images..."
|
|||||||
|
|
||||||
source ./hooks/arches.sh
|
source ./hooks/arches.sh
|
||||||
|
|
||||||
|
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||||
|
# This var is typically predefined by Docker Hub, but it won't be
|
||||||
|
# when testing locally.
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Construct a version string in the style of `build.rs`.
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
LABELS=(
|
||||||
|
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||||
|
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
|
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||||
|
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||||
|
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||||
|
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
|
||||||
|
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||||
|
)
|
||||||
|
LABEL_ARGS=()
|
||||||
|
for label in "${LABELS[@]}"; do
|
||||||
|
LABEL_ARGS+=(--label "${label}")
|
||||||
|
done
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
for arch in "${arches[@]}"; do
|
||||||
docker build \
|
docker build \
|
||||||
|
"${LABEL_ARGS[@]}" \
|
||||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||||
-f docker/${arch}/Dockerfile${os_suffix} \
|
-f docker/${arch}/Dockerfile${distro_suffix} \
|
||||||
.
|
.
|
||||||
done
|
done
|
||||||
|
28
hooks/pre_build
Executable file
28
hooks/pre_build
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# If requested, print some environment info for troubleshooting.
|
||||||
|
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
||||||
|
id
|
||||||
|
pwd
|
||||||
|
df -h
|
||||||
|
env
|
||||||
|
docker info
|
||||||
|
docker version
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install build dependencies.
|
||||||
|
deps=(
|
||||||
|
jq
|
||||||
|
)
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y "${deps[@]}"
|
||||||
|
|
||||||
|
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
||||||
|
# Git operations that we perform later, so fetch the complete history and
|
||||||
|
# tags first. Note that if the build is cached, the clone may have been
|
||||||
|
# unshallowed already; if so, unshallowing will fail, so skip it.
|
||||||
|
if [[ -f .git/shallow ]]; then
|
||||||
|
git fetch --unshallow --tags
|
||||||
|
fi
|
211
hooks/push
211
hooks/push
@@ -1,117 +1,138 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
echo ">>> Pushing images..."
|
|
||||||
|
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
|
||||||
|
|
||||||
declare -A annotations=(
|
|
||||||
[amd64]="--os linux --arch amd64"
|
|
||||||
[arm32v6]="--os linux --arch arm --variant v6"
|
|
||||||
[arm32v7]="--os linux --arch arm --variant v7"
|
|
||||||
[arm64v8]="--os linux --arch arm64 --variant v8"
|
|
||||||
)
|
|
||||||
|
|
||||||
source ./hooks/arches.sh
|
source ./hooks/arches.sh
|
||||||
|
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
|
||||||
|
# Join a list of args with a single char.
|
||||||
|
# Ref: https://stackoverflow.com/a/17841619
|
||||||
|
join() { local IFS="$1"; shift; echo "$*"; }
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
declare -A images
|
echo ">>> Starting local Docker registry..."
|
||||||
|
|
||||||
|
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
||||||
|
# builds, but it can't access existing images on the Docker host (like the
|
||||||
|
# cross-compiled ones we just built). Those images first need to be pushed to
|
||||||
|
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
||||||
|
# up those intermediate images on Docker Hub, it's easier to just run a local
|
||||||
|
# Docker registry, which gets cleaned up automatically once the build job ends.
|
||||||
|
#
|
||||||
|
# https://docs.docker.com/registry/deploying/
|
||||||
|
# https://hub.docker.com/_/registry
|
||||||
|
#
|
||||||
|
# Use host networking so the buildx container can access the registry via
|
||||||
|
# localhost.
|
||||||
|
#
|
||||||
|
docker run -d --name registry --network host registry:2 # defaults to port 5000
|
||||||
|
|
||||||
|
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
||||||
|
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
||||||
|
LOCAL_REGISTRY="localhost:5000"
|
||||||
|
REPO="${DOCKER_REPO#*/}"
|
||||||
|
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||||
|
|
||||||
|
echo ">>> Pushing images to local registry..."
|
||||||
|
|
||||||
for arch in ${arches[@]}; do
|
for arch in ${arches[@]}; do
|
||||||
images[$arch]="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||||
|
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||||
|
docker tag "${docker_image}" "${local_image}"
|
||||||
|
docker push "${local_image}"
|
||||||
done
|
done
|
||||||
|
|
||||||
# Push the images that were just built; manifest list creation fails if the
|
echo ">>> Setting up Docker Buildx..."
|
||||||
# images (manifests) referenced don't already exist in the Docker registry.
|
|
||||||
for image in "${images[@]}"; do
|
|
||||||
docker push "${image}"
|
|
||||||
done
|
|
||||||
|
|
||||||
manifest_lists=("${DOCKER_REPO}:${DOCKER_TAG}")
|
# Same as earlier, use host networking so the buildx container can access the
|
||||||
|
# registry via localhost.
|
||||||
|
#
|
||||||
|
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
||||||
|
#
|
||||||
|
docker buildx create --name builder --use --driver-opt network=host
|
||||||
|
|
||||||
# If the Docker tag starts with a version number, assume the latest release is
|
echo ">>> Running Docker Buildx..."
|
||||||
# being pushed. Add an extra manifest (`latest` or `alpine`, as appropriate)
|
|
||||||
|
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||||
|
|
||||||
|
# If the Docker tag starts with a version number, assume the latest release
|
||||||
|
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
||||||
# to make it easier for users to track the latest release.
|
# to make it easier for users to track the latest release.
|
||||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||||
manifest_lists+=(${DOCKER_REPO}:alpine)
|
tags+=(${DOCKER_REPO}:alpine)
|
||||||
else
|
else
|
||||||
manifest_lists+=(${DOCKER_REPO}:latest)
|
tags+=(${DOCKER_REPO}:latest)
|
||||||
|
|
||||||
# Add an extra `latest-arm32v6` tag; Docker can't seem to properly
|
|
||||||
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
|
|
||||||
# (https://github.com/moby/moby/issues/41017).
|
|
||||||
#
|
|
||||||
# Add this tag only for the SQLite image, as the MySQL and PostgreSQL
|
|
||||||
# builds don't currently work on non-amd64 arches.
|
|
||||||
#
|
|
||||||
# TODO: Also add an `alpine-arm32v6` tag if multi-arch support for
|
|
||||||
# Alpine-based bitwarden_rs images is implemented before this Docker
|
|
||||||
# issue is fixed.
|
|
||||||
if [[ ${DOCKER_REPO} == *server ]]; then
|
|
||||||
docker tag "${DOCKER_REPO}:${DOCKER_TAG}-arm32v6" "${DOCKER_REPO}:latest-arm32v6"
|
|
||||||
docker push "${DOCKER_REPO}:latest-arm32v6"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for manifest_list in "${manifest_lists[@]}"; do
|
tag_args=()
|
||||||
# Create the (multi-arch) manifest list of arch-specific images.
|
for tag in "${tags[@]}"; do
|
||||||
docker manifest create ${manifest_list} ${images[@]}
|
tag_args+=(--tag "${tag}")
|
||||||
|
|
||||||
# Make sure each image manifest is annotated with the correct arch info.
|
|
||||||
# Docker does not auto-detect the arch of each cross-compiled image, so
|
|
||||||
# everything would appear as `linux/amd64` otherwise.
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker manifest annotate ${annotations[$arch]} ${manifest_list} ${images[$arch]}
|
|
||||||
done
|
|
||||||
|
|
||||||
# Push the manifest list.
|
|
||||||
docker manifest push --purge ${manifest_list}
|
|
||||||
done
|
done
|
||||||
|
|
||||||
# Avoid logging credentials and tokens.
|
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
||||||
set +ex
|
# the arch list to a platform list (assuming the OS is always `linux`).
|
||||||
|
declare -A arch_to_platform=(
|
||||||
# Delete the arch-specific tags, if credentials for doing so are available.
|
[amd64]="linux/amd64"
|
||||||
# Note that `DOCKER_PASSWORD` must be the actual user password. Passing a JWT
|
[armv6]="linux/arm/v6"
|
||||||
# obtained using a personal access token results in a 403 error with
|
[armv7]="linux/arm/v7"
|
||||||
# {"detail": "access to the resource is forbidden with personal access token"}
|
[arm64]="linux/arm64"
|
||||||
if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then
|
)
|
||||||
exit 0
|
platforms=()
|
||||||
fi
|
|
||||||
|
|
||||||
# Given a JSON input on stdin, extract the string value associated with the
|
|
||||||
# specified key. This avoids an extra dependency on a tool like `jq`.
|
|
||||||
extract() {
|
|
||||||
local key="$1"
|
|
||||||
# Extract "<key>":"<val>" (assumes key/val won't contain double quotes).
|
|
||||||
# The colon may have whitespace on either side.
|
|
||||||
grep -o "\"${key}\"[[:space:]]*:[[:space:]]*\"[^\"]\+\"" |
|
|
||||||
# Extract just <val> by deleting the last '"', and then greedily deleting
|
|
||||||
# everything up to '"'.
|
|
||||||
sed -e 's/"$//' -e 's/.*"//'
|
|
||||||
}
|
|
||||||
|
|
||||||
echo ">>> Getting API token..."
|
|
||||||
jwt=$(curl -sS -X POST \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d "{\"username\":\"${DOCKER_USERNAME}\",\"password\": \"${DOCKER_PASSWORD}\"}" \
|
|
||||||
"https://hub.docker.com/v2/users/login" |
|
|
||||||
extract 'token')
|
|
||||||
|
|
||||||
# Strip the registry portion from `index.docker.io/user/repo`.
|
|
||||||
repo="${DOCKER_REPO#*/}"
|
|
||||||
|
|
||||||
for arch in ${arches[@]}; do
|
for arch in ${arches[@]}; do
|
||||||
# Don't delete the `arm32v6` tag; Docker can't seem to properly
|
platforms+=("${arch_to_platform[$arch]}")
|
||||||
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
|
|
||||||
# (https://github.com/moby/moby/issues/41017).
|
|
||||||
if [[ ${arch} == 'arm32v6' ]]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
tag="${DOCKER_TAG}-${arch}"
|
|
||||||
echo ">>> Deleting '${repo}:${tag}'..."
|
|
||||||
curl -sS -X DELETE \
|
|
||||||
-H "Authorization: Bearer ${jwt}" \
|
|
||||||
"https://hub.docker.com/v2/repositories/${repo}/tags/${tag}/"
|
|
||||||
done
|
done
|
||||||
|
platforms="$(join "," "${platforms[@]}")"
|
||||||
|
|
||||||
|
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||||
|
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||||
|
# context, which isn't needed here since the actual cross-compiled images
|
||||||
|
# have already been built.
|
||||||
|
docker buildx build \
|
||||||
|
--network host \
|
||||||
|
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||||
|
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||||
|
--platform "${platforms}" \
|
||||||
|
"${tag_args[@]}" \
|
||||||
|
--push \
|
||||||
|
- < ./docker/Dockerfile.buildx
|
||||||
|
|
||||||
|
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
|
||||||
|
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
|
||||||
|
# (https://github.com/moby/moby/issues/41017).
|
||||||
|
#
|
||||||
|
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||||
|
# existing vaultwarden tags, which adhere to the naming conventions of the
|
||||||
|
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||||
|
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||||
|
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||||
|
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
|
||||||
|
#
|
||||||
|
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
|
||||||
|
# so this step can be removed once fixed versions are in wider distribution.
|
||||||
|
#
|
||||||
|
# Tags:
|
||||||
|
#
|
||||||
|
# testing => testing-arm32v6
|
||||||
|
# testing-alpine => <ignored>
|
||||||
|
# x.y.z => x.y.z-arm32v6, latest-arm32v6
|
||||||
|
# x.y.z-alpine => <ignored>
|
||||||
|
#
|
||||||
|
if [[ "${DOCKER_TAG}" != *alpine ]]; then
|
||||||
|
image="${DOCKER_REPO}":"${DOCKER_TAG}"
|
||||||
|
|
||||||
|
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
|
||||||
|
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
|
||||||
|
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
|
||||||
|
|
||||||
|
# Pull the armv6 image by digest, retag it, and repush it.
|
||||||
|
docker pull "${DOCKER_REPO}"@"${digest}"
|
||||||
|
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
|
||||||
|
docker push "${image}"-arm32v6
|
||||||
|
|
||||||
|
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||||
|
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
|
||||||
|
docker push "${DOCKER_REPO}:latest"-arm32v6
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
1
migrations/mysql/2021-03-11-190243_add_sends/down.sql
Normal file
1
migrations/mysql/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE sends;
|
25
migrations/mysql/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/mysql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE sends (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
akey TEXT NOT NULL,
|
||||||
|
password_hash BLOB,
|
||||||
|
password_salt BLOB,
|
||||||
|
password_iter INTEGER,
|
||||||
|
|
||||||
|
max_access_count INTEGER,
|
||||||
|
access_count INTEGER NOT NULL,
|
||||||
|
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
revision_date DATETIME NOT NULL,
|
||||||
|
expiration_date DATETIME,
|
||||||
|
deletion_date DATETIME NOT NULL,
|
||||||
|
|
||||||
|
disabled BOOLEAN NOT NULL
|
||||||
|
);
|
2
migrations/mysql/2021-04-30-233251_add_reprompt/up.sql
Normal file
2
migrations/mysql/2021-04-30-233251_add_reprompt/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN reprompt INTEGER;
|
2
migrations/mysql/2021-05-11-205202_add_hide_email/up.sql
Normal file
2
migrations/mysql/2021-05-11-205202_add_hide_email/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE sends
|
||||||
|
ADD COLUMN hide_email BOOLEAN;
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN private_key TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN public_key TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT true;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE sends;
|
25
migrations/postgresql/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/postgresql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE sends (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
key TEXT NOT NULL,
|
||||||
|
password_hash BYTEA,
|
||||||
|
password_salt BYTEA,
|
||||||
|
password_iter INTEGER,
|
||||||
|
|
||||||
|
max_access_count INTEGER,
|
||||||
|
access_count INTEGER NOT NULL,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP NOT NULL,
|
||||||
|
revision_date TIMESTAMP NOT NULL,
|
||||||
|
expiration_date TIMESTAMP,
|
||||||
|
deletion_date TIMESTAMP NOT NULL,
|
||||||
|
|
||||||
|
disabled BOOLEAN NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE sends RENAME COLUMN key TO akey;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN reprompt INTEGER;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE sends
|
||||||
|
ADD COLUMN hide_email BOOLEAN;
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN private_key TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN public_key TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
1
migrations/sqlite/2021-03-11-190243_add_sends/down.sql
Normal file
1
migrations/sqlite/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE sends;
|
25
migrations/sqlite/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/sqlite/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE sends (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT REFERENCES users (uuid),
|
||||||
|
organization_uuid TEXT REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
key TEXT NOT NULL,
|
||||||
|
password_hash BLOB,
|
||||||
|
password_salt BLOB,
|
||||||
|
password_iter INTEGER,
|
||||||
|
|
||||||
|
max_access_count INTEGER,
|
||||||
|
access_count INTEGER NOT NULL,
|
||||||
|
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
revision_date DATETIME NOT NULL,
|
||||||
|
expiration_date DATETIME,
|
||||||
|
deletion_date DATETIME NOT NULL,
|
||||||
|
|
||||||
|
disabled BOOLEAN NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE sends RENAME COLUMN key TO akey;
|
2
migrations/sqlite/2021-04-30-233251_add_reprompt/up.sql
Normal file
2
migrations/sqlite/2021-04-30-233251_add_reprompt/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN reprompt INTEGER;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE sends
|
||||||
|
ADD COLUMN hide_email BOOLEAN;
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN private_key TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE organizations
|
||||||
|
ADD COLUMN public_key TEXT;
|
99
resources/vaultwarden-icon-white.svg
Normal file
99
resources/vaultwarden-icon-white.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 8.7 KiB |
86
resources/vaultwarden-icon.svg
Normal file
86
resources/vaultwarden-icon.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 8.4 KiB |
271
resources/vaultwarden-logo-white.svg
Normal file
271
resources/vaultwarden-logo-white.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 17 KiB |
151
resources/vaultwarden-logo.svg
Normal file
151
resources/vaultwarden-logo.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 12 KiB |
@@ -1 +1 @@
|
|||||||
nightly-2020-07-11
|
nightly-2021-06-24
|
@@ -1,2 +1,7 @@
|
|||||||
version = "Two"
|
version = "Two"
|
||||||
|
edition = "2018"
|
||||||
max_width = 120
|
max_width = 120
|
||||||
|
newline_style = "Unix"
|
||||||
|
use_small_heuristics = "Off"
|
||||||
|
struct_lit_single_line = false
|
||||||
|
overflow_delimited_expr = true
|
||||||
|
370
src/api/admin.rs
370
src/api/admin.rs
@@ -1,10 +1,10 @@
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::process::Command;
|
use std::{env, time::Duration};
|
||||||
|
|
||||||
use rocket::{
|
use rocket::{
|
||||||
http::{Cookie, Cookies, SameSite},
|
http::{Cookie, Cookies, SameSite, Status},
|
||||||
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||||
response::{content::Html, Flash, Redirect},
|
response::{content::Html, Flash, Redirect},
|
||||||
Route,
|
Route,
|
||||||
@@ -12,13 +12,13 @@ use rocket::{
|
|||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, JsonResult},
|
api::{ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, models::*, DbConn, DbConnType},
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
mail,
|
mail,
|
||||||
util::get_display_size,
|
util::{format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -30,13 +30,17 @@ pub fn routes() -> Vec<Route> {
|
|||||||
routes![
|
routes![
|
||||||
admin_login,
|
admin_login,
|
||||||
get_users_json,
|
get_users_json,
|
||||||
|
get_user_json,
|
||||||
post_admin_login,
|
post_admin_login,
|
||||||
admin_page,
|
admin_page,
|
||||||
invite_user,
|
invite_user,
|
||||||
logout,
|
logout,
|
||||||
delete_user,
|
delete_user,
|
||||||
deauth_user,
|
deauth_user,
|
||||||
|
disable_user,
|
||||||
|
enable_user,
|
||||||
remove_2fa,
|
remove_2fa,
|
||||||
|
update_user_org_type,
|
||||||
update_revision_users,
|
update_revision_users,
|
||||||
post_config,
|
post_config,
|
||||||
delete_config,
|
delete_config,
|
||||||
@@ -44,17 +48,25 @@ pub fn routes() -> Vec<Route> {
|
|||||||
test_smtp,
|
test_smtp,
|
||||||
users_overview,
|
users_overview,
|
||||||
organizations_overview,
|
organizations_overview,
|
||||||
|
delete_organization,
|
||||||
diagnostics,
|
diagnostics,
|
||||||
|
get_diagnostics_config
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
|
static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||||
DbConnType::from_url(&CONFIG.database_url())
|
DbConnType::from_url(&CONFIG.database_url())
|
||||||
.map(|t| t == DbConnType::sqlite)
|
.map(|t| match t {
|
||||||
.unwrap_or(false)
|
DbConnType::sqlite => "SQLite",
|
||||||
&& Command::new("sqlite3").arg("-version").status().is_ok()
|
DbConnType::mysql => "MySQL",
|
||||||
|
DbConnType::postgresql => "PostgreSQL",
|
||||||
|
})
|
||||||
|
.unwrap_or("Unknown")
|
||||||
});
|
});
|
||||||
|
|
||||||
|
static CAN_BACKUP: Lazy<bool> =
|
||||||
|
Lazy::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false));
|
||||||
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
fn admin_disabled() -> &'static str {
|
fn admin_disabled() -> &'static str {
|
||||||
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
|
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
|
||||||
@@ -80,6 +92,27 @@ impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct IpHeader(Option<String>);
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||||
|
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
||||||
|
} else if req.headers().get_one("X-Client-IP").is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(String::from("X-Client-IP"))))
|
||||||
|
} else if req.headers().get_one("X-Real-IP").is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(String::from("X-Real-IP"))))
|
||||||
|
} else if req.headers().get_one("X-Forwarded-For").is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(String::from("X-Forwarded-For"))))
|
||||||
|
} else {
|
||||||
|
Outcome::Success(IpHeader(None))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Used for `Location` response headers, which must specify an absolute URI
|
/// Used for `Location` response headers, which must specify an absolute URI
|
||||||
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||||
fn admin_url(referer: Referer) -> String {
|
fn admin_url(referer: Referer) -> String {
|
||||||
@@ -105,7 +138,12 @@ fn admin_url(referer: Referer) -> String {
|
|||||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||||
// If there is an error, show it
|
// If there is an error, show it
|
||||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||||
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg, "urlpath": CONFIG.domain_path()});
|
let json = json!({
|
||||||
|
"page_content": "admin/login",
|
||||||
|
"version": VERSION,
|
||||||
|
"error": msg,
|
||||||
|
"urlpath": CONFIG.domain_path()
|
||||||
|
});
|
||||||
|
|
||||||
// Return the page
|
// Return the page
|
||||||
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
||||||
@@ -129,10 +167,7 @@ fn post_admin_login(
|
|||||||
// If the token is invalid, redirect to login page
|
// If the token is invalid, redirect to login page
|
||||||
if !_validate_token(&data.token) {
|
if !_validate_token(&data.token) {
|
||||||
error!("Invalid admin token. IP: {}", ip.ip);
|
error!("Invalid admin token. IP: {}", ip.ip);
|
||||||
Err(Flash::error(
|
Err(Flash::error(Redirect::to(admin_url(referer)), "Invalid admin token, please try again."))
|
||||||
Redirect::to(admin_url(referer)),
|
|
||||||
"Invalid admin token, please try again.",
|
|
||||||
))
|
|
||||||
} else {
|
} else {
|
||||||
// If the token received is valid, generate JWT and save it as a cookie
|
// If the token received is valid, generate JWT and save it as a cookie
|
||||||
let claims = generate_admin_claims();
|
let claims = generate_admin_claims();
|
||||||
@@ -161,9 +196,7 @@ fn _validate_token(token: &str) -> bool {
|
|||||||
struct AdminTemplateData {
|
struct AdminTemplateData {
|
||||||
page_content: String,
|
page_content: String,
|
||||||
version: Option<&'static str>,
|
version: Option<&'static str>,
|
||||||
users: Option<Vec<Value>>,
|
page_data: Option<Value>,
|
||||||
organizations: Option<Vec<Value>>,
|
|
||||||
diagnostics: Option<Value>,
|
|
||||||
config: Value,
|
config: Value,
|
||||||
can_backup: bool,
|
can_backup: bool,
|
||||||
logged_in: bool,
|
logged_in: bool,
|
||||||
@@ -179,51 +212,19 @@ impl AdminTemplateData {
|
|||||||
can_backup: *CAN_BACKUP,
|
can_backup: *CAN_BACKUP,
|
||||||
logged_in: true,
|
logged_in: true,
|
||||||
urlpath: CONFIG.domain_path(),
|
urlpath: CONFIG.domain_path(),
|
||||||
users: None,
|
page_data: None,
|
||||||
organizations: None,
|
|
||||||
diagnostics: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn users(users: Vec<Value>) -> Self {
|
fn with_data(page_content: &str, page_data: Value) -> Self {
|
||||||
Self {
|
Self {
|
||||||
page_content: String::from("admin/users"),
|
page_content: String::from(page_content),
|
||||||
version: VERSION,
|
version: VERSION,
|
||||||
users: Some(users),
|
page_data: Some(page_data),
|
||||||
config: CONFIG.prepare_json(),
|
config: CONFIG.prepare_json(),
|
||||||
can_backup: *CAN_BACKUP,
|
can_backup: *CAN_BACKUP,
|
||||||
logged_in: true,
|
logged_in: true,
|
||||||
urlpath: CONFIG.domain_path(),
|
urlpath: CONFIG.domain_path(),
|
||||||
organizations: None,
|
|
||||||
diagnostics: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn organizations(organizations: Vec<Value>) -> Self {
|
|
||||||
Self {
|
|
||||||
page_content: String::from("admin/organizations"),
|
|
||||||
version: VERSION,
|
|
||||||
organizations: Some(organizations),
|
|
||||||
config: CONFIG.prepare_json(),
|
|
||||||
can_backup: *CAN_BACKUP,
|
|
||||||
logged_in: true,
|
|
||||||
urlpath: CONFIG.domain_path(),
|
|
||||||
users: None,
|
|
||||||
diagnostics: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn diagnostics(diagnostics: Value) -> Self {
|
|
||||||
Self {
|
|
||||||
page_content: String::from("admin/diagnostics"),
|
|
||||||
version: VERSION,
|
|
||||||
organizations: None,
|
|
||||||
config: CONFIG.prepare_json(),
|
|
||||||
can_backup: *CAN_BACKUP,
|
|
||||||
logged_in: true,
|
|
||||||
urlpath: CONFIG.domain_path(),
|
|
||||||
users: None,
|
|
||||||
diagnostics: Some(diagnostics),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,23 +245,39 @@ struct InviteData {
|
|||||||
email: String,
|
email: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||||
|
if let Some(user) = User::find_by_uuid(uuid, conn) {
|
||||||
|
Ok(user)
|
||||||
|
} else {
|
||||||
|
err_code!("User doesn't exist", Status::NotFound.code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/invite", data = "<data>")]
|
#[post("/invite", data = "<data>")]
|
||||||
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
let email = data.email.clone();
|
let email = data.email.clone();
|
||||||
if User::find_by_mail(&data.email, &conn).is_some() {
|
if User::find_by_mail(&data.email, &conn).is_some() {
|
||||||
err!("User already exists")
|
err_code!("User already exists", Status::Conflict.code)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = User::new(email);
|
let mut user = User::new(email);
|
||||||
user.save(&conn)?;
|
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
// TODO: After try_blocks is stabilized, this can be made more readable
|
||||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)
|
// See: https://github.com/rust-lang/rust/issues/31436
|
||||||
} else {
|
(|| {
|
||||||
let invitation = Invitation::new(data.email);
|
if CONFIG.mail_enabled() {
|
||||||
invitation.save(&conn)
|
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)?;
|
||||||
}
|
} else {
|
||||||
|
let invitation = Invitation::new(data.email);
|
||||||
|
invitation.save(&conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
})()
|
||||||
|
.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||||
|
|
||||||
|
Ok(Json(user.to_json(&conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/test/smtp", data = "<data>")]
|
#[post("/test/smtp", data = "<data>")]
|
||||||
@@ -275,58 +292,126 @@ fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(mut cookies: Cookies, referer: Referer) -> Result<Redirect, ()> {
|
fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
|
||||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||||
Ok(Redirect::to(admin_url(referer)))
|
Redirect::to(admin_url(referer))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users")]
|
#[get("/users")]
|
||||||
fn get_users_json(_token: AdminToken, conn: DbConn) -> JsonResult {
|
fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||||
let users = User::get_all(&conn);
|
let users = User::get_all(&conn);
|
||||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||||
|
|
||||||
Ok(Json(Value::Array(users_json)))
|
Json(Value::Array(users_json))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users/overview")]
|
#[get("/users/overview")]
|
||||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
let users = User::get_all(&conn);
|
let users = User::get_all(&conn);
|
||||||
let users_json: Vec<Value> = users.iter()
|
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||||
|
let users_json: Vec<Value> = users
|
||||||
|
.iter()
|
||||||
.map(|u| {
|
.map(|u| {
|
||||||
let mut usr = u.to_json(&conn);
|
let mut usr = u.to_json(&conn);
|
||||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
||||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
||||||
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||||
|
usr["last_active"] = match u.last_active(&conn) {
|
||||||
|
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||||
|
None => json!("Never"),
|
||||||
|
};
|
||||||
usr
|
usr
|
||||||
}).collect();
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
let text = AdminTemplateData::users(users_json).render()?;
|
let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/users/<uuid>")]
|
||||||
|
fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||||
|
let user = get_user_or_404(&uuid, &conn)?;
|
||||||
|
|
||||||
|
Ok(Json(user.to_json(&conn)))
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/delete")]
|
#[post("/users/<uuid>/delete")]
|
||||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
let user = get_user_or_404(&uuid, &conn)?;
|
||||||
user.delete(&conn)
|
user.delete(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/deauth")]
|
#[post("/users/<uuid>/deauth")]
|
||||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/disable")]
|
||||||
|
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||||
|
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
|
user.reset_security_stamp();
|
||||||
|
user.enabled = false;
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/enable")]
|
||||||
|
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||||
|
user.enabled = true;
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
struct UserOrgTypeData {
|
||||||
|
user_type: NumberOrString,
|
||||||
|
user_uuid: String,
|
||||||
|
org_uuid: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/org_type", data = "<data>")]
|
||||||
|
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let data: UserOrgTypeData = data.into_inner();
|
||||||
|
|
||||||
|
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("The specified user isn't member of the organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||||
|
Some(new_type) => new_type as i32,
|
||||||
|
None => err!("Invalid type"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||||
|
// Removing owner permmission, check that there are at least another owner
|
||||||
|
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
||||||
|
|
||||||
|
if num_owners <= 1 {
|
||||||
|
err!("Can't change the type of the last owner")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
user_to_edit.atype = new_type as i32;
|
||||||
|
user_to_edit.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/users/update_revision")]
|
#[post("/users/update_revision")]
|
||||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
User::update_all_revisions(&conn)
|
User::update_all_revisions(&conn)
|
||||||
@@ -335,19 +420,28 @@ fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
|||||||
#[get("/organizations/overview")]
|
#[get("/organizations/overview")]
|
||||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
let organizations = Organization::get_all(&conn);
|
let organizations = Organization::get_all(&conn);
|
||||||
let organizations_json: Vec<Value> = organizations.iter().map(|o| {
|
let organizations_json: Vec<Value> = organizations
|
||||||
|
.iter()
|
||||||
|
.map(|o| {
|
||||||
let mut org = o.to_json();
|
let mut org = o.to_json();
|
||||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||||
org
|
org
|
||||||
}).collect();
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/organizations/<uuid>/delete")]
|
||||||
|
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
||||||
|
org.delete(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct WebVaultVersion {
|
struct WebVaultVersion {
|
||||||
version: String,
|
version: String,
|
||||||
@@ -364,77 +458,121 @@ struct GitCommit {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
let github_api = get_reqwest_client();
|
||||||
use std::time::Duration;
|
|
||||||
let github_api = Client::builder().build()?;
|
|
||||||
|
|
||||||
Ok(
|
Ok(github_api.get(url).timeout(Duration::from_secs(10)).send()?.error_for_status()?.json::<T>()?)
|
||||||
github_api.get(url)
|
}
|
||||||
.timeout(Duration::from_secs(10))
|
|
||||||
.header(USER_AGENT, "Bitwarden_RS")
|
fn has_http_access() -> bool {
|
||||||
.send()?
|
let http_access = get_reqwest_client();
|
||||||
.error_for_status()?
|
|
||||||
.json::<T>()?
|
match http_access.head("https://github.com/dani-garcia/vaultwarden").timeout(Duration::from_secs(10)).send() {
|
||||||
)
|
Ok(r) => r.status().is_success(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/diagnostics")]
|
#[get("/diagnostics")]
|
||||||
fn diagnostics(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
use std::net::ToSocketAddrs;
|
|
||||||
use chrono::prelude::*;
|
|
||||||
use crate::util::read_file_string;
|
use crate::util::read_file_string;
|
||||||
|
use chrono::prelude::*;
|
||||||
|
use std::net::ToSocketAddrs;
|
||||||
|
|
||||||
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
// Get current running versions
|
||||||
let vault_version_str = read_file_string(&vault_version_path)?;
|
let web_vault_version: WebVaultVersion =
|
||||||
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "bwrs-version.json")) {
|
||||||
|
Ok(s) => serde_json::from_str(&s)?,
|
||||||
|
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||||
|
Ok(s) => serde_json::from_str(&s)?,
|
||||||
|
_ => WebVaultVersion {
|
||||||
|
version: String::from("Version file missing"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
let github_ips = ("github.com", 0).to_socket_addrs().map(|mut i| i.next());
|
// Execute some environment checks
|
||||||
let (dns_resolved, dns_ok) = match github_ips {
|
let running_within_docker = is_running_in_docker();
|
||||||
Ok(Some(a)) => (a.ip().to_string(), true),
|
let has_http_access = has_http_access();
|
||||||
_ => ("Could not resolve domain name.".to_string(), false),
|
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||||
|
|| env::var_os("http_proxy").is_some()
|
||||||
|
|| env::var_os("HTTPS_PROXY").is_some()
|
||||||
|
|| env::var_os("https_proxy").is_some();
|
||||||
|
|
||||||
|
// Check if we are able to resolve DNS entries
|
||||||
|
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||||
|
Ok(Some(a)) => a.ip().to_string(),
|
||||||
|
_ => "Could not resolve domain name.".to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// If the DNS Check failed, do not even attempt to check for new versions since we were not able to resolve github.com
|
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||||
let (latest_release, latest_commit, latest_web_build) = if dns_ok {
|
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||||
|
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||||
(
|
(
|
||||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
|
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") {
|
||||||
Ok(r) => r.tag_name,
|
Ok(r) => r.tag_name,
|
||||||
_ => "-".to_string()
|
_ => "-".to_string(),
|
||||||
},
|
},
|
||||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
|
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main") {
|
||||||
Ok(mut c) => {
|
Ok(mut c) => {
|
||||||
c.sha.truncate(8);
|
c.sha.truncate(8);
|
||||||
c.sha
|
c.sha
|
||||||
|
}
|
||||||
|
_ => "-".to_string(),
|
||||||
},
|
},
|
||||||
_ => "-".to_string()
|
// Do not fetch the web-vault version when running within Docker.
|
||||||
},
|
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
|
if running_within_docker {
|
||||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
"-".to_string()
|
||||||
_ => "-".to_string()
|
} else {
|
||||||
|
match get_github_api::<GitRelease>(
|
||||||
|
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||||
|
) {
|
||||||
|
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||||
|
_ => "-".to_string(),
|
||||||
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
("-".to_string(), "-".to_string(), "-".to_string())
|
("-".to_string(), "-".to_string(), "-".to_string())
|
||||||
};
|
};
|
||||||
|
|
||||||
// Run the date check as the last item right before filling the json.
|
let ip_header_name = match &ip_header.0 {
|
||||||
// This should ensure that the time difference between the browser and the server is as minimal as possible.
|
Some(h) => h,
|
||||||
let dt = Utc::now();
|
_ => "",
|
||||||
let server_time = dt.format("%Y-%m-%d %H:%M:%S").to_string();
|
};
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
"dns_resolved": dns_resolved,
|
"dns_resolved": dns_resolved,
|
||||||
"server_time": server_time,
|
|
||||||
"web_vault_version": web_vault_version.version,
|
|
||||||
"latest_release": latest_release,
|
"latest_release": latest_release,
|
||||||
"latest_commit": latest_commit,
|
"latest_commit": latest_commit,
|
||||||
|
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||||
|
"web_vault_version": web_vault_version.version,
|
||||||
"latest_web_build": latest_web_build,
|
"latest_web_build": latest_web_build,
|
||||||
|
"running_within_docker": running_within_docker,
|
||||||
|
"has_http_access": has_http_access,
|
||||||
|
"ip_header_exists": &ip_header.0.is_some(),
|
||||||
|
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||||
|
"ip_header_name": ip_header_name,
|
||||||
|
"ip_header_config": &CONFIG.ip_header(),
|
||||||
|
"uses_proxy": uses_proxy,
|
||||||
|
"db_type": *DB_TYPE,
|
||||||
|
"db_version": get_sql_server_version(&conn),
|
||||||
|
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||||
|
"overrides": &CONFIG.get_overrides().join(", "),
|
||||||
|
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||||
|
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||||
});
|
});
|
||||||
|
|
||||||
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
let text = AdminTemplateData::with_data("admin/diagnostics", diagnostics_json).render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/diagnostics/config")]
|
||||||
|
fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
||||||
|
let support_json = CONFIG.get_support_json();
|
||||||
|
Json(support_json)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/config", data = "<data>")]
|
#[post("/config", data = "<data>")]
|
||||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||||
let data: ConfigBuilder = data.into_inner();
|
let data: ConfigBuilder = data.into_inner();
|
||||||
@@ -447,11 +585,11 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/config/backup_db")]
|
#[post("/config/backup_db")]
|
||||||
fn backup_db(_token: AdminToken) -> EmptyResult {
|
fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
if *CAN_BACKUP {
|
if *CAN_BACKUP {
|
||||||
backup_database()
|
backup_database(&conn)
|
||||||
} else {
|
} else {
|
||||||
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
|
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
||||||
@@ -94,7 +95,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
// Order is important here; the invitation check must come first
|
// Order is important here; the invitation check must come first
|
||||||
// because the bitwarden_rs admin can invite anyone, regardless
|
// because the vaultwarden admin can invite anyone, regardless
|
||||||
// of other signup restrictions.
|
// of other signup restrictions.
|
||||||
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
||||||
User::new(data.Email.clone())
|
User::new(data.Email.clone())
|
||||||
@@ -115,7 +116,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
user.client_kdf_type = client_kdf_type;
|
user.client_kdf_type = client_kdf_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.MasterPasswordHash);
|
user.set_password(&data.MasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
// Add extra fields if present
|
// Add extra fields if present
|
||||||
@@ -139,10 +140,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
user.last_verifying_at = Some(user.created_at);
|
user.last_verifying_at = Some(user.created_at);
|
||||||
} else {
|
} else if let Err(e) = mail::send_welcome(&user.email) {
|
||||||
if let Err(e) = mail::send_welcome(&user.email) {
|
error!("Error sending welcome email: {:#?}", e);
|
||||||
error!("Error sending welcome email: {:#?}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,8 +149,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/accounts/profile")]
|
#[get("/accounts/profile")]
|
||||||
fn profile(headers: Headers, conn: DbConn) -> JsonResult {
|
fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
Ok(Json(headers.user.to_json(&conn)))
|
Json(headers.user.to_json(&conn))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -232,7 +231,10 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(
|
||||||
|
&data.NewMasterPasswordHash,
|
||||||
|
Some(vec![String::from("post_rotatekey"), String::from("get_contacts")]),
|
||||||
|
);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -259,7 +261,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
|||||||
|
|
||||||
user.client_kdf_iter = data.KdfIterations;
|
user.client_kdf_iter = data.KdfIterations;
|
||||||
user.client_kdf_type = data.Kdf;
|
user.client_kdf_type = data.Kdf;
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -321,15 +323,9 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||||||
err!("The cipher is not owned by the user")
|
err!("The cipher is not owned by the user")
|
||||||
}
|
}
|
||||||
|
|
||||||
update_cipher_from_data(
|
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||||
&mut saved_cipher,
|
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||||
cipher_data,
|
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None)?
|
||||||
&headers,
|
|
||||||
false,
|
|
||||||
&conn,
|
|
||||||
&nt,
|
|
||||||
UpdateType::CipherUpdate,
|
|
||||||
)?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update user data
|
// Update user data
|
||||||
@@ -445,7 +441,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||||||
user.email_new = None;
|
user.email_new = None;
|
||||||
user.email_new_token = None;
|
user.email_new_token = None;
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
@@ -460,7 +456,7 @@ fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||||
error!("Error sending delete account email: {:#?}", e);
|
error!("Error sending verify_email email: {:#?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -584,24 +580,45 @@ struct PasswordHintData {
|
|||||||
|
|
||||||
#[post("/accounts/password-hint", data = "<data>")]
|
#[post("/accounts/password-hint", data = "<data>")]
|
||||||
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
||||||
let data: PasswordHintData = data.into_inner().data;
|
if !CONFIG.mail_enabled() && !CONFIG.show_password_hint() {
|
||||||
|
err!("This server is not configured to provide password hints.");
|
||||||
let hint = match User::find_by_mail(&data.Email, &conn) {
|
|
||||||
Some(user) => user.password_hint,
|
|
||||||
None => return Ok(()),
|
|
||||||
};
|
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
|
||||||
mail::send_password_hint(&data.Email, hint)?;
|
|
||||||
} else if CONFIG.show_password_hint() {
|
|
||||||
if let Some(hint) = hint {
|
|
||||||
err!(format!("Your password hint is: {}", &hint));
|
|
||||||
} else {
|
|
||||||
err!("Sorry, you have no password hint...");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
const NO_HINT: &str = "Sorry, you have no password hint...";
|
||||||
|
|
||||||
|
let data: PasswordHintData = data.into_inner().data;
|
||||||
|
let email = &data.Email;
|
||||||
|
|
||||||
|
match User::find_by_mail(email, &conn) {
|
||||||
|
None => {
|
||||||
|
// To prevent user enumeration, act as if the user exists.
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
// There is still a timing side channel here in that the code
|
||||||
|
// paths that send mail take noticeably longer than ones that
|
||||||
|
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||||
|
use rand::{thread_rng, Rng};
|
||||||
|
let mut rng = thread_rng();
|
||||||
|
let base = 1000;
|
||||||
|
let delta: i32 = 100;
|
||||||
|
let sleep_ms = (base + rng.gen_range(-delta..=delta)) as u64;
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(sleep_ms));
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
err!(NO_HINT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(user) => {
|
||||||
|
let hint: Option<String> = user.password_hint;
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_password_hint(email, hint)?;
|
||||||
|
Ok(())
|
||||||
|
} else if let Some(hint) = hint {
|
||||||
|
err!(format!("Your password hint is: {}", hint));
|
||||||
|
} else {
|
||||||
|
err!(NO_HINT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -611,7 +628,7 @@ struct PreloginData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/prelogin", data = "<data>")]
|
#[post("/accounts/prelogin", data = "<data>")]
|
||||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||||
let data: PreloginData = data.into_inner().data;
|
let data: PreloginData = data.into_inner().data;
|
||||||
|
|
||||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||||
@@ -619,10 +636,10 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
|||||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Kdf": kdf_type,
|
"Kdf": kdf_type,
|
||||||
"KdfIterations": kdf_iter
|
"KdfIterations": kdf_iter
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
|
@@ -1,22 +1,32 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use rocket::{http::ContentType, request::Form, Data, Route};
|
use rocket::{http::ContentType, request::Form, Data, Route};
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use data_encoding::HEXLOWER;
|
|
||||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn, DbPool},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
|
// Note that many routes have an `admin` variant; this seems to be
|
||||||
|
// because the stored procedure that upstream Bitwarden uses to determine
|
||||||
|
// whether the user can edit a cipher doesn't take into account whether
|
||||||
|
// the user is an org owner/admin. The `admin` variant first checks
|
||||||
|
// whether the user is an owner/admin of the relevant org, and if so,
|
||||||
|
// allows the operation unconditionally.
|
||||||
|
//
|
||||||
|
// vaultwarden factors in the org owner/admin status as part of
|
||||||
|
// determining the write accessibility of a cipher, so most
|
||||||
|
// admin/non-admin implementations can be shared.
|
||||||
routes![
|
routes![
|
||||||
sync,
|
sync,
|
||||||
get_ciphers,
|
get_ciphers,
|
||||||
@@ -28,8 +38,11 @@ pub fn routes() -> Vec<Route> {
|
|||||||
post_ciphers_admin,
|
post_ciphers_admin,
|
||||||
post_ciphers_create,
|
post_ciphers_create,
|
||||||
post_ciphers_import,
|
post_ciphers_import,
|
||||||
post_attachment,
|
get_attachment,
|
||||||
post_attachment_admin,
|
post_attachment_v2,
|
||||||
|
post_attachment_v2_data,
|
||||||
|
post_attachment, // legacy
|
||||||
|
post_attachment_admin, // legacy
|
||||||
post_attachment_share,
|
post_attachment_share,
|
||||||
delete_attachment_post,
|
delete_attachment_post,
|
||||||
delete_attachment_post_admin,
|
delete_attachment_post_admin,
|
||||||
@@ -38,7 +51,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
post_cipher_admin,
|
post_cipher_admin,
|
||||||
post_cipher_share,
|
post_cipher_share,
|
||||||
put_cipher_share,
|
put_cipher_share,
|
||||||
put_cipher_share_seleted,
|
put_cipher_share_selected,
|
||||||
post_cipher,
|
post_cipher,
|
||||||
put_cipher,
|
put_cipher,
|
||||||
delete_cipher_post,
|
delete_cipher_post,
|
||||||
@@ -50,6 +63,9 @@ pub fn routes() -> Vec<Route> {
|
|||||||
delete_cipher_selected,
|
delete_cipher_selected,
|
||||||
delete_cipher_selected_post,
|
delete_cipher_selected_post,
|
||||||
delete_cipher_selected_put,
|
delete_cipher_selected_put,
|
||||||
|
delete_cipher_selected_admin,
|
||||||
|
delete_cipher_selected_post_admin,
|
||||||
|
delete_cipher_selected_put_admin,
|
||||||
restore_cipher_put,
|
restore_cipher_put,
|
||||||
restore_cipher_put_admin,
|
restore_cipher_put_admin,
|
||||||
restore_cipher_selected,
|
restore_cipher_selected,
|
||||||
@@ -63,6 +79,15 @@ pub fn routes() -> Vec<Route> {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn purge_trashed_ciphers(pool: DbPool) {
|
||||||
|
debug!("Purging trashed ciphers");
|
||||||
|
if let Ok(conn) = pool.get() {
|
||||||
|
Cipher::purge_trash(&conn);
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while purging trashed ciphers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(FromForm, Default)]
|
#[derive(FromForm, Default)]
|
||||||
struct SyncData {
|
struct SyncData {
|
||||||
#[form(field = "excludeDomains")]
|
#[form(field = "excludeDomains")]
|
||||||
@@ -70,55 +95,57 @@ struct SyncData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/sync?<data..>")]
|
#[get("/sync?<data..>")]
|
||||||
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let user_json = headers.user.to_json(&conn);
|
let user_json = headers.user.to_json(&conn);
|
||||||
|
|
||||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||||
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
let collections_json: Vec<Value> =
|
||||||
|
collections.iter().map(|c| c.to_json_details(&headers.user.uuid, &conn)).collect();
|
||||||
|
|
||||||
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
||||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> =
|
||||||
.iter()
|
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
|
||||||
.collect();
|
let sends = Send::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
let sends_json: Vec<Value> = sends.iter().map(|s| s.to_json()).collect();
|
||||||
|
|
||||||
let domains_json = if data.exclude_domains {
|
let domains_json = if data.exclude_domains {
|
||||||
Value::Null
|
Value::Null
|
||||||
} else {
|
} else {
|
||||||
api::core::_get_eq_domains(headers, true).unwrap().into_inner()
|
api::core::_get_eq_domains(headers, true).into_inner()
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Profile": user_json,
|
"Profile": user_json,
|
||||||
"Folders": folders_json,
|
"Folders": folders_json,
|
||||||
"Collections": collections_json,
|
"Collections": collections_json,
|
||||||
"Policies": policies_json,
|
"Policies": policies_json,
|
||||||
"Ciphers": ciphers_json,
|
"Ciphers": ciphers_json,
|
||||||
"Domains": domains_json,
|
"Domains": domains_json,
|
||||||
|
"Sends": sends_json,
|
||||||
|
"unofficialServer": true,
|
||||||
"Object": "sync"
|
"Object": "sync"
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers")]
|
#[get("/ciphers")]
|
||||||
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||||
|
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> =
|
||||||
.iter()
|
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": ciphers_json,
|
"Data": ciphers_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers/<uuid>")]
|
#[get("/ciphers/<uuid>")]
|
||||||
@@ -174,6 +201,7 @@ pub struct CipherData {
|
|||||||
Identity: Option<Value>,
|
Identity: Option<Value>,
|
||||||
|
|
||||||
Favorite: Option<bool>,
|
Favorite: Option<bool>,
|
||||||
|
Reprompt: Option<i32>,
|
||||||
|
|
||||||
PasswordHistory: Option<Value>,
|
PasswordHistory: Option<Value>,
|
||||||
|
|
||||||
@@ -181,6 +209,14 @@ pub struct CipherData {
|
|||||||
#[serde(rename = "Attachments")]
|
#[serde(rename = "Attachments")]
|
||||||
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
||||||
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
||||||
|
|
||||||
|
// The revision datetime (in ISO 8601 format) of the client's local copy
|
||||||
|
// of the cipher. This is used to prevent a client from updating a cipher
|
||||||
|
// when it doesn't have the latest version, as that can result in data
|
||||||
|
// loss. It's not an error when no value is provided; this can happen
|
||||||
|
// when using older client versions, or if the operation doesn't involve
|
||||||
|
// updating an existing cipher.
|
||||||
|
LastKnownRevisionDate: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -190,25 +226,55 @@ pub struct Attachments2Data {
|
|||||||
Key: String,
|
Key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Called when an org admin clones an org cipher.
|
||||||
#[post("/ciphers/admin", data = "<data>")]
|
#[post("/ciphers/admin", data = "<data>")]
|
||||||
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: ShareCipherData = data.into_inner().data;
|
post_ciphers_create(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called when creating a new org-owned cipher, or cloning a cipher (whether
|
||||||
|
/// user- or org-owned). When cloning a cipher to a user-owned cipher,
|
||||||
|
/// `organizationId` is null.
|
||||||
|
#[post("/ciphers/create", data = "<data>")]
|
||||||
|
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
let mut data: ShareCipherData = data.into_inner().data;
|
||||||
|
|
||||||
|
// Check if there are one more more collections selected when this cipher is part of an organization.
|
||||||
|
// err if this is not the case before creating an empty cipher.
|
||||||
|
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
|
||||||
|
err!("You must select at least one collection.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// This check is usually only needed in update_cipher_from_data(), but we
|
||||||
|
// need it here as well to avoid creating an empty cipher in the call to
|
||||||
|
// cipher.save() below.
|
||||||
|
enforce_personal_ownership_policy(&data.Cipher, &headers, &conn)?;
|
||||||
|
|
||||||
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
||||||
cipher.user_uuid = Some(headers.user.uuid.clone());
|
cipher.user_uuid = Some(headers.user.uuid.clone());
|
||||||
cipher.save(&conn)?;
|
cipher.save(&conn)?;
|
||||||
|
|
||||||
|
// When cloning a cipher, the Bitwarden clients seem to set this field
|
||||||
|
// based on the cipher being cloned (when creating a new cipher, it's set
|
||||||
|
// to null as expected). However, `cipher.created_at` is initialized to
|
||||||
|
// the current time, so the stale data check will end up failing down the
|
||||||
|
// line. Since this function only creates new ciphers (whether by cloning
|
||||||
|
// or otherwise), we can just ignore this field entirely.
|
||||||
|
data.Cipher.LastKnownRevisionDate = None;
|
||||||
|
|
||||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/create", data = "<data>")]
|
/// Called when creating a new user-owned cipher.
|
||||||
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
|
||||||
post_ciphers_admin(data, headers, conn, nt)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[post("/ciphers", data = "<data>")]
|
#[post("/ciphers", data = "<data>")]
|
||||||
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: CipherData = data.into_inner().data;
|
let mut data: CipherData = data.into_inner().data;
|
||||||
|
|
||||||
|
// The web/browser clients set this field to null as expected, but the
|
||||||
|
// mobile clients seem to set the invalid value `0001-01-01T00:00:00`,
|
||||||
|
// which results in a warning message being logged. This field isn't
|
||||||
|
// needed when creating a new cipher, so just ignore it unconditionally.
|
||||||
|
data.LastKnownRevisionDate = None;
|
||||||
|
|
||||||
let mut cipher = Cipher::new(data.Type, data.Name.clone());
|
let mut cipher = Cipher::new(data.Type, data.Name.clone());
|
||||||
update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &nt, UpdateType::CipherCreate)?;
|
update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &nt, UpdateType::CipherCreate)?;
|
||||||
@@ -216,6 +282,24 @@ fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt
|
|||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
|
||||||
|
/// A non-owner/admin user belonging to an org with the personal ownership policy
|
||||||
|
/// enabled isn't allowed to create new user-owned ciphers or modify existing ones
|
||||||
|
/// (that were created before the policy was applicable to the user). The user is
|
||||||
|
/// allowed to delete or share such ciphers to an org, however.
|
||||||
|
///
|
||||||
|
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
|
||||||
|
fn enforce_personal_ownership_policy(data: &CipherData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||||
|
if data.OrganizationId.is_none() {
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
let policy_type = OrgPolicyType::PersonalOwnership;
|
||||||
|
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||||
|
err!("Due to an Enterprise Policy, you are restricted from saving items to your personal vault.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn update_cipher_from_data(
|
pub fn update_cipher_from_data(
|
||||||
cipher: &mut Cipher,
|
cipher: &mut Cipher,
|
||||||
data: CipherData,
|
data: CipherData,
|
||||||
@@ -225,19 +309,38 @@ pub fn update_cipher_from_data(
|
|||||||
nt: &Notify,
|
nt: &Notify,
|
||||||
ut: UpdateType,
|
ut: UpdateType,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
enforce_personal_ownership_policy(&data, headers, conn)?;
|
||||||
|
|
||||||
|
// Check that the client isn't updating an existing cipher with stale data.
|
||||||
|
if let Some(dt) = data.LastKnownRevisionDate {
|
||||||
|
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||||
|
// ISO 8601 format
|
||||||
|
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||||
|
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||||
|
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||||
|
}
|
||||||
|
Ok(_) => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
||||||
err!("Organization mismatch. Please resync the client before updating the cipher")
|
err!("Organization mismatch. Please resync the client before updating the cipher")
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(org_id) = data.OrganizationId {
|
if let Some(org_id) = data.OrganizationId {
|
||||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn) {
|
||||||
None => err!("You don't have permission to add item to organization"),
|
None => err!("You don't have permission to add item to organization"),
|
||||||
Some(org_user) => {
|
Some(org_user) => {
|
||||||
if shared_to_collection
|
if shared_to_collection
|
||||||
|| org_user.has_full_access()
|
|| org_user.has_full_access()
|
||||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn)
|
||||||
{
|
{
|
||||||
cipher.organization_uuid = Some(org_id);
|
cipher.organization_uuid = Some(org_id);
|
||||||
|
// After some discussion in PR #1329 re-added the user_uuid = None again.
|
||||||
|
// TODO: Audit/Check the whole save/update cipher chain.
|
||||||
|
// Upstream uses the user_uuid to allow a cipher added by a user to an org to still allow the user to view/edit the cipher
|
||||||
|
// even when the user has hide-passwords configured as there policy.
|
||||||
|
// Removing the line below would fix that, but we have to check which effect this would have on the rest of the code.
|
||||||
cipher.user_uuid = None;
|
cipher.user_uuid = None;
|
||||||
} else {
|
} else {
|
||||||
err!("You don't have permission to add cipher directly to organization")
|
err!("You don't have permission to add cipher directly to organization")
|
||||||
@@ -262,7 +365,7 @@ pub fn update_cipher_from_data(
|
|||||||
// Modify attachments name and keys when rotating
|
// Modify attachments name and keys when rotating
|
||||||
if let Some(attachments) = data.Attachments2 {
|
if let Some(attachments) = data.Attachments2 {
|
||||||
for (id, attachment) in attachments {
|
for (id, attachment) in attachments {
|
||||||
let mut saved_att = match Attachment::find_by_id(&id, &conn) {
|
let mut saved_att = match Attachment::find_by_id(&id, conn) {
|
||||||
Some(att) => att,
|
Some(att) => att,
|
||||||
None => err!("Attachment doesn't exist"),
|
None => err!("Attachment doesn't exist"),
|
||||||
};
|
};
|
||||||
@@ -277,10 +380,24 @@ pub fn update_cipher_from_data(
|
|||||||
saved_att.akey = Some(attachment.Key);
|
saved_att.akey = Some(attachment.Key);
|
||||||
saved_att.file_name = attachment.FileName;
|
saved_att.file_name = attachment.FileName;
|
||||||
|
|
||||||
saved_att.save(&conn)?;
|
saved_att.save(conn)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cleanup cipher data, like removing the 'Response' key.
|
||||||
|
// This key is somewhere generated during Javascript so no way for us this fix this.
|
||||||
|
// Also, upstream only retrieves keys they actually want to store, and thus skip the 'Response' key.
|
||||||
|
// We do not mind which data is in it, the keep our model more flexible when there are upstream changes.
|
||||||
|
// But, we at least know we do not need to store and return this specific key.
|
||||||
|
fn _clean_cipher_data(mut json_data: Value) -> Value {
|
||||||
|
if json_data.is_array() {
|
||||||
|
json_data.as_array_mut().unwrap().iter_mut().for_each(|ref mut f| {
|
||||||
|
f.as_object_mut().unwrap().remove("Response");
|
||||||
|
});
|
||||||
|
};
|
||||||
|
json_data
|
||||||
|
}
|
||||||
|
|
||||||
let type_data_opt = match data.Type {
|
let type_data_opt = match data.Type {
|
||||||
1 => data.Login,
|
1 => data.Login,
|
||||||
2 => data.SecureNote,
|
2 => data.SecureNote,
|
||||||
@@ -289,32 +406,32 @@ pub fn update_cipher_from_data(
|
|||||||
_ => err!("Invalid type"),
|
_ => err!("Invalid type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut type_data = match type_data_opt {
|
let type_data = match type_data_opt {
|
||||||
Some(data) => data,
|
Some(mut data) => {
|
||||||
|
// Remove the 'Response' key from the base object.
|
||||||
|
data.as_object_mut().unwrap().remove("Response");
|
||||||
|
// Remove the 'Response' key from every Uri.
|
||||||
|
if data["Uris"].is_array() {
|
||||||
|
data["Uris"] = _clean_cipher_data(data["Uris"].clone());
|
||||||
|
}
|
||||||
|
data
|
||||||
|
}
|
||||||
None => err!("Data missing"),
|
None => err!("Data missing"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: ******* Backwards compat start **********
|
|
||||||
// To remove backwards compatibility, just delete this code,
|
|
||||||
// and remove the compat code from cipher::to_json
|
|
||||||
type_data["Name"] = Value::String(data.Name.clone());
|
|
||||||
type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null);
|
|
||||||
type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null);
|
|
||||||
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
|
|
||||||
// TODO: ******* Backwards compat end **********
|
|
||||||
|
|
||||||
cipher.name = data.Name;
|
cipher.name = data.Name;
|
||||||
cipher.notes = data.Notes;
|
cipher.notes = data.Notes;
|
||||||
cipher.fields = data.Fields.map(|f| f.to_string());
|
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
||||||
cipher.data = type_data.to_string();
|
cipher.data = type_data.to_string();
|
||||||
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
||||||
|
cipher.reprompt = data.Reprompt;
|
||||||
|
|
||||||
cipher.save(&conn)?;
|
cipher.save(conn)?;
|
||||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
cipher.move_to_folder(data.FolderId, &headers.user.uuid, conn)?;
|
||||||
cipher.set_favorite(data.Favorite, &headers.user.uuid, &conn)?;
|
cipher.set_favorite(data.Favorite, &headers.user.uuid, conn)?;
|
||||||
|
|
||||||
if ut != UpdateType::None {
|
if ut != UpdateType::None {
|
||||||
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -374,6 +491,7 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Called when an org admin modifies an existing org cipher.
|
||||||
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
||||||
fn put_cipher_admin(
|
fn put_cipher_admin(
|
||||||
uuid: String,
|
uuid: String,
|
||||||
@@ -479,14 +597,11 @@ fn post_collections_admin(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let posted_collections: HashSet<String> = data.CollectionIds.iter().cloned().collect();
|
let posted_collections: HashSet<String> = data.CollectionIds.iter().cloned().collect();
|
||||||
let current_collections: HashSet<String> = cipher
|
let current_collections: HashSet<String> =
|
||||||
.get_collections(&headers.user.uuid, &conn)
|
cipher.get_collections(&headers.user.uuid, &conn).iter().cloned().collect();
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
||||||
match Collection::find_by_uuid(&collection, &conn) {
|
match Collection::find_by_uuid(collection, &conn) {
|
||||||
None => err!("Invalid collection ID provided"),
|
None => err!("Invalid collection ID provided"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
if collection.is_writable_by_user(&headers.user.uuid, &conn) {
|
if collection.is_writable_by_user(&headers.user.uuid, &conn) {
|
||||||
@@ -548,7 +663,7 @@ struct ShareSelectedCipherData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/share", data = "<data>")]
|
#[put("/ciphers/share", data = "<data>")]
|
||||||
fn put_cipher_share_seleted(
|
fn put_cipher_share_selected(
|
||||||
data: JsonUpcase<ShareSelectedCipherData>,
|
data: JsonUpcase<ShareSelectedCipherData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
@@ -600,9 +715,9 @@ fn share_cipher_by_uuid(
|
|||||||
conn: &DbConn,
|
conn: &DbConn,
|
||||||
nt: &Notify,
|
nt: &Notify,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let mut cipher = match Cipher::find_by_uuid(uuid, conn) {
|
||||||
Some(cipher) => {
|
Some(cipher) => {
|
||||||
if cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||||
cipher
|
cipher
|
||||||
} else {
|
} else {
|
||||||
err!("Cipher is not write accessible")
|
err!("Cipher is not write accessible")
|
||||||
@@ -619,11 +734,11 @@ fn share_cipher_by_uuid(
|
|||||||
None => {}
|
None => {}
|
||||||
Some(organization_uuid) => {
|
Some(organization_uuid) => {
|
||||||
for uuid in &data.CollectionIds {
|
for uuid in &data.CollectionIds {
|
||||||
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, conn) {
|
||||||
None => err!("Invalid collection ID provided"),
|
None => err!("Invalid collection ID provided"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
if collection.is_writable_by_user(&headers.user.uuid, &conn) {
|
if collection.is_writable_by_user(&headers.user.uuid, conn) {
|
||||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn)?;
|
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn)?;
|
||||||
shared_to_collection = true;
|
shared_to_collection = true;
|
||||||
} else {
|
} else {
|
||||||
err!("No rights to modify the collection")
|
err!("No rights to modify the collection")
|
||||||
@@ -637,45 +752,126 @@ fn share_cipher_by_uuid(
|
|||||||
update_cipher_from_data(
|
update_cipher_from_data(
|
||||||
&mut cipher,
|
&mut cipher,
|
||||||
data.Cipher,
|
data.Cipher,
|
||||||
&headers,
|
headers,
|
||||||
shared_to_collection,
|
shared_to_collection,
|
||||||
&conn,
|
conn,
|
||||||
&nt,
|
nt,
|
||||||
UpdateType::CipherUpdate,
|
UpdateType::CipherUpdate,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
/// v2 API for downloading an attachment. This just redirects the client to
|
||||||
fn post_attachment(
|
/// the actual location of an attachment.
|
||||||
|
///
|
||||||
|
/// Upstream added this v2 API to support direct download of attachments from
|
||||||
|
/// their object storage service. For self-hosted instances, it basically just
|
||||||
|
/// redirects to the same location as before the v2 API.
|
||||||
|
#[get("/ciphers/<uuid>/attachment/<attachment_id>")]
|
||||||
|
fn get_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
match Attachment::find_by_id(&attachment_id, &conn) {
|
||||||
|
Some(attachment) if uuid == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))),
|
||||||
|
Some(_) => err!("Attachment doesn't belong to cipher"),
|
||||||
|
None => err!("Attachment doesn't exist"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct AttachmentRequestData {
|
||||||
|
Key: String,
|
||||||
|
FileName: String,
|
||||||
|
FileSize: i32,
|
||||||
|
// We check org owner/admin status via is_write_accessible_to_user(),
|
||||||
|
// so we can just ignore this field.
|
||||||
|
//
|
||||||
|
// AdminRequest: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
enum FileUploadType {
|
||||||
|
Direct = 0,
|
||||||
|
// Azure = 1, // only used upstream
|
||||||
|
}
|
||||||
|
|
||||||
|
/// v2 API for creating an attachment associated with a cipher.
|
||||||
|
/// This redirects the client to the API it should use to upload the attachment.
|
||||||
|
/// For upstream's cloud-hosted service, it's an Azure object storage API.
|
||||||
|
/// For self-hosted instances, it's another API on the local instance.
|
||||||
|
#[post("/ciphers/<uuid>/attachment/v2", data = "<data>")]
|
||||||
|
fn post_attachment_v2(
|
||||||
uuid: String,
|
uuid: String,
|
||||||
data: Data,
|
data: JsonUpcase<AttachmentRequestData>,
|
||||||
content_type: &ContentType,
|
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
nt: Notify,
|
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||||
|
Some(cipher) => cipher,
|
||||||
|
None => err!("Cipher doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||||
|
err!("Cipher is not write accessible")
|
||||||
|
}
|
||||||
|
|
||||||
|
let attachment_id = crypto::generate_attachment_id();
|
||||||
|
let data: AttachmentRequestData = data.into_inner().data;
|
||||||
|
let attachment =
|
||||||
|
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
|
||||||
|
attachment.save(&conn).expect("Error saving attachment");
|
||||||
|
|
||||||
|
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
|
||||||
|
|
||||||
|
Ok(Json(json!({ // AttachmentUploadDataResponseModel
|
||||||
|
"Object": "attachment-fileUpload",
|
||||||
|
"AttachmentId": attachment_id,
|
||||||
|
"Url": url,
|
||||||
|
"FileUploadType": FileUploadType::Direct as i32,
|
||||||
|
"CipherResponse": cipher.to_json(&headers.host, &headers.user.uuid, &conn),
|
||||||
|
"CipherMiniResponse": null,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Saves the data content of an attachment to a file. This is common code
|
||||||
|
/// shared between the v2 and legacy attachment APIs.
|
||||||
|
///
|
||||||
|
/// When used with the legacy API, this function is responsible for creating
|
||||||
|
/// the attachment database record, so `attachment` is None.
|
||||||
|
///
|
||||||
|
/// When used with the v2 API, post_attachment_v2() has already created the
|
||||||
|
/// database record, which is passed in as `attachment`.
|
||||||
|
fn save_attachment(
|
||||||
|
mut attachment: Option<Attachment>,
|
||||||
|
cipher_uuid: String,
|
||||||
|
data: Data,
|
||||||
|
content_type: &ContentType,
|
||||||
|
headers: &Headers,
|
||||||
|
conn: &DbConn,
|
||||||
|
nt: Notify,
|
||||||
|
) -> Result<Cipher, crate::error::Error> {
|
||||||
|
let cipher = match Cipher::find_by_uuid(&cipher_uuid, conn) {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err_discard!("Cipher doesn't exist", data),
|
None => err_discard!("Cipher doesn't exist", data),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||||
err_discard!("Cipher is not write accessible", data)
|
err_discard!("Cipher is not write accessible", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut params = content_type.params();
|
// In the v2 API, the attachment record has already been created,
|
||||||
let boundary_pair = params.next().expect("No boundary provided");
|
// so the size limit needs to be adjusted to account for that.
|
||||||
let boundary = boundary_pair.1;
|
let size_adjust = match &attachment {
|
||||||
|
None => 0, // Legacy API
|
||||||
|
Some(a) => a.file_size as i64, // v2 API
|
||||||
|
};
|
||||||
|
|
||||||
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
||||||
match CONFIG.user_attachment_limit() {
|
match CONFIG.user_attachment_limit() {
|
||||||
Some(0) => err_discard!("Attachments are disabled", data),
|
Some(0) => err_discard!("Attachments are disabled", data),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn);
|
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, conn) + size_adjust;
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
err_discard!("Attachment storage limit reached! Delete some attachments to free up space", data)
|
||||||
}
|
}
|
||||||
Some(left as u64)
|
Some(left as u64)
|
||||||
}
|
}
|
||||||
@@ -685,9 +881,9 @@ fn post_attachment(
|
|||||||
match CONFIG.org_attachment_limit() {
|
match CONFIG.org_attachment_limit() {
|
||||||
Some(0) => err_discard!("Attachments are disabled", data),
|
Some(0) => err_discard!("Attachments are disabled", data),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn);
|
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, conn) + size_adjust;
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
err_discard!("Attachment storage limit reached! Delete some attachments to free up space", data)
|
||||||
}
|
}
|
||||||
Some(left as u64)
|
Some(left as u64)
|
||||||
}
|
}
|
||||||
@@ -697,7 +893,12 @@ fn post_attachment(
|
|||||||
err_discard!("Cipher is neither owned by a user nor an organization", data);
|
err_discard!("Cipher is neither owned by a user nor an organization", data);
|
||||||
};
|
};
|
||||||
|
|
||||||
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher.uuid);
|
let mut params = content_type.params();
|
||||||
|
let boundary_pair = params.next().expect("No boundary provided");
|
||||||
|
let boundary = boundary_pair.1;
|
||||||
|
|
||||||
|
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher_uuid);
|
||||||
|
let mut path = PathBuf::new();
|
||||||
|
|
||||||
let mut attachment_key = None;
|
let mut attachment_key = None;
|
||||||
let mut error = None;
|
let mut error = None;
|
||||||
@@ -713,34 +914,81 @@ fn post_attachment(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
"data" => {
|
"data" => {
|
||||||
// This is provided by the client, don't trust it
|
// In the legacy API, this is the encrypted filename
|
||||||
let name = field.headers.filename.expect("No filename provided");
|
// provided by the client, stored to the database as-is.
|
||||||
|
// In the v2 API, this value doesn't matter, as it was
|
||||||
|
// already provided and stored via an earlier API call.
|
||||||
|
let encrypted_filename = field.headers.filename;
|
||||||
|
|
||||||
let file_name = HEXLOWER.encode(&crypto::get_random(vec![0; 10]));
|
// This random ID is used as the name of the file on disk.
|
||||||
let path = base_path.join(&file_name);
|
// In the legacy API, we need to generate this value here.
|
||||||
|
// In the v2 API, we use the value from post_attachment_v2().
|
||||||
let size = match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) {
|
let file_id = match &attachment {
|
||||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
Some(attachment) => attachment.id.clone(), // v2 API
|
||||||
SaveResult::Full(other) => {
|
None => crypto::generate_attachment_id(), // Legacy API
|
||||||
std::fs::remove_file(path).ok();
|
|
||||||
error = Some(format!("Attachment is not a file: {:?}", other));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
SaveResult::Partial(_, reason) => {
|
|
||||||
std::fs::remove_file(path).ok();
|
|
||||||
error = Some(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
SaveResult::Error(e) => {
|
|
||||||
std::fs::remove_file(path).ok();
|
|
||||||
error = Some(format!("Error: {:?}", e));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
path = base_path.join(&file_id);
|
||||||
|
|
||||||
let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
|
let size =
|
||||||
attachment.akey = attachment_key.clone();
|
match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) {
|
||||||
attachment.save(&conn).expect("Error saving attachment");
|
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||||
|
SaveResult::Full(other) => {
|
||||||
|
error = Some(format!("Attachment is not a file: {:?}", other));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
SaveResult::Partial(_, reason) => {
|
||||||
|
error = Some(format!("Attachment storage limit exceeded with this file: {:?}", reason));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
SaveResult::Error(e) => {
|
||||||
|
error = Some(format!("Error: {:?}", e));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(attachment) = &mut attachment {
|
||||||
|
// v2 API
|
||||||
|
|
||||||
|
// Check the actual size against the size initially provided by
|
||||||
|
// the client. Upstream allows +/- 1 MiB deviation from this
|
||||||
|
// size, but it's not clear when or why this is needed.
|
||||||
|
const LEEWAY: i32 = 1024 * 1024; // 1 MiB
|
||||||
|
let min_size = attachment.file_size - LEEWAY;
|
||||||
|
let max_size = attachment.file_size + LEEWAY;
|
||||||
|
|
||||||
|
if min_size <= size && size <= max_size {
|
||||||
|
if size != attachment.file_size {
|
||||||
|
// Update the attachment with the actual file size.
|
||||||
|
attachment.file_size = size;
|
||||||
|
attachment.save(conn).expect("Error updating attachment");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
attachment.delete(conn).ok();
|
||||||
|
|
||||||
|
let err_msg = "Attachment size mismatch".to_string();
|
||||||
|
error!("{} (expected within [{}, {}], got {})", err_msg, min_size, max_size, size);
|
||||||
|
error = Some(err_msg);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Legacy API
|
||||||
|
|
||||||
|
if encrypted_filename.is_none() {
|
||||||
|
error = Some("No filename provided".to_string());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if attachment_key.is_none() {
|
||||||
|
error = Some("No attachment key provided".to_string());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let attachment = Attachment::new(
|
||||||
|
file_id,
|
||||||
|
cipher_uuid.clone(),
|
||||||
|
encrypted_filename.unwrap(),
|
||||||
|
size,
|
||||||
|
attachment_key.clone(),
|
||||||
|
);
|
||||||
|
attachment.save(conn).expect("Error saving attachment");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_ => error!("Invalid multipart name"),
|
_ => error!("Invalid multipart name"),
|
||||||
}
|
}
|
||||||
@@ -748,10 +996,55 @@ fn post_attachment(
|
|||||||
.expect("Error processing multipart data");
|
.expect("Error processing multipart data");
|
||||||
|
|
||||||
if let Some(ref e) = error {
|
if let Some(ref e) = error {
|
||||||
|
std::fs::remove_file(path).ok();
|
||||||
err!(e);
|
err!(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn));
|
||||||
|
|
||||||
|
Ok(cipher)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// v2 API for uploading the actual data content of an attachment.
|
||||||
|
/// This route needs a rank specified so that Rocket prioritizes the
|
||||||
|
/// /ciphers/<uuid>/attachment/v2 route, which would otherwise conflict
|
||||||
|
/// with this one.
|
||||||
|
#[post("/ciphers/<uuid>/attachment/<attachment_id>", format = "multipart/form-data", data = "<data>", rank = 1)]
|
||||||
|
fn post_attachment_v2_data(
|
||||||
|
uuid: String,
|
||||||
|
attachment_id: String,
|
||||||
|
data: Data,
|
||||||
|
content_type: &ContentType,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let attachment = match Attachment::find_by_id(&attachment_id, &conn) {
|
||||||
|
Some(attachment) if uuid == attachment.cipher_uuid => Some(attachment),
|
||||||
|
Some(_) => err!("Attachment doesn't belong to cipher"),
|
||||||
|
None => err!("Attachment doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
save_attachment(attachment, uuid, data, content_type, &headers, &conn, nt)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Legacy API for creating an attachment associated with a cipher.
|
||||||
|
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
||||||
|
fn post_attachment(
|
||||||
|
uuid: String,
|
||||||
|
data: Data,
|
||||||
|
content_type: &ContentType,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify,
|
||||||
|
) -> JsonResult {
|
||||||
|
// Setting this as None signifies to save_attachment() that it should create
|
||||||
|
// the attachment database record as well as saving the data to disk.
|
||||||
|
let attachment = None;
|
||||||
|
|
||||||
|
let cipher = save_attachment(attachment, uuid, data, content_type, &headers, &conn, nt)?;
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
}
|
}
|
||||||
@@ -862,22 +1155,47 @@ fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn:
|
|||||||
|
|
||||||
#[put("/ciphers/delete", data = "<data>")]
|
#[put("/ciphers/delete", data = "<data>")]
|
||||||
fn delete_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_multiple_ciphers(data, headers, conn, true, nt)
|
_delete_multiple_ciphers(data, headers, conn, true, nt) // soft delete
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/ciphers/admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
delete_cipher_selected(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/ciphers/delete-admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_post_admin(
|
||||||
|
data: JsonUpcase<Value>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify,
|
||||||
|
) -> EmptyResult {
|
||||||
|
delete_cipher_selected_post(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/delete-admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_put_admin(
|
||||||
|
data: JsonUpcase<Value>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify,
|
||||||
|
) -> EmptyResult {
|
||||||
|
delete_cipher_selected_put(data, headers, conn, nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/<uuid>/restore")]
|
#[put("/ciphers/<uuid>/restore")]
|
||||||
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/<uuid>/restore-admin")]
|
#[put("/ciphers/<uuid>/restore-admin")]
|
||||||
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/restore", data = "<data>")]
|
#[put("/ciphers/restore", data = "<data>")]
|
||||||
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
_restore_multiple_ciphers(data, headers, conn, nt)
|
_restore_multiple_ciphers(data, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -963,7 +1281,6 @@ fn delete_all(
|
|||||||
Some(user_org) => {
|
Some(user_org) => {
|
||||||
if user_org.atype == UserOrgType::Owner {
|
if user_org.atype == UserOrgType::Owner {
|
||||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||||
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
|
|
||||||
nt.send_user_update(UpdateType::Vault, &user);
|
nt.send_user_update(UpdateType::Vault, &user);
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
@@ -992,28 +1309,34 @@ fn delete_all(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_delete: bool, nt: &Notify) -> EmptyResult {
|
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_delete: bool, nt: &Notify) -> EmptyResult {
|
||||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let mut cipher = match Cipher::find_by_uuid(uuid, conn) {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||||
err!("Cipher can't be deleted by user")
|
err!("Cipher can't be deleted by user")
|
||||||
}
|
}
|
||||||
|
|
||||||
if soft_delete {
|
if soft_delete {
|
||||||
cipher.deleted_at = Some(chrono::Utc::now().naive_utc());
|
cipher.deleted_at = Some(Utc::now().naive_utc());
|
||||||
cipher.save(&conn)?;
|
cipher.save(conn)?;
|
||||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn));
|
||||||
} else {
|
} else {
|
||||||
cipher.delete(&conn)?;
|
cipher.delete(conn)?;
|
||||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(conn));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, soft_delete: bool, nt: Notify) -> EmptyResult {
|
fn _delete_multiple_ciphers(
|
||||||
|
data: JsonUpcase<Value>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
soft_delete: bool,
|
||||||
|
nt: Notify,
|
||||||
|
) -> EmptyResult {
|
||||||
let data: Value = data.into_inner().data;
|
let data: Value = data.into_inner().data;
|
||||||
|
|
||||||
let uuids = match data.get("Ids") {
|
let uuids = match data.get("Ids") {
|
||||||
@@ -1033,24 +1356,24 @@ fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbC
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {
|
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let mut cipher = match Cipher::find_by_uuid(uuid, conn) {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||||
err!("Cipher can't be restored by user")
|
err!("Cipher can't be restored by user")
|
||||||
}
|
}
|
||||||
|
|
||||||
cipher.deleted_at = None;
|
cipher.deleted_at = None;
|
||||||
cipher.save(&conn)?;
|
cipher.save(conn)?;
|
||||||
|
|
||||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn));
|
||||||
Ok(())
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||||
let data: Value = data.into_inner().data;
|
let data: Value = data.into_inner().data;
|
||||||
|
|
||||||
let uuids = match data.get("Ids") {
|
let uuids = match data.get("Ids") {
|
||||||
@@ -1061,13 +1384,19 @@ fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: Db
|
|||||||
None => err!("Request missing ids field"),
|
None => err!("Request missing ids field"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut ciphers: Vec<Value> = Vec::new();
|
||||||
for uuid in uuids {
|
for uuid in uuids {
|
||||||
if let error @ Err(_) = _restore_cipher_by_uuid(uuid, &headers, &conn, &nt) {
|
match _restore_cipher_by_uuid(uuid, headers, conn, nt) {
|
||||||
return error;
|
Ok(json) => ciphers.push(json.into_inner()),
|
||||||
};
|
err => return err,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(Json(json!({
|
||||||
|
"Data": ciphers,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _delete_cipher_attachment_by_id(
|
fn _delete_cipher_attachment_by_id(
|
||||||
@@ -1077,7 +1406,7 @@ fn _delete_cipher_attachment_by_id(
|
|||||||
conn: &DbConn,
|
conn: &DbConn,
|
||||||
nt: &Notify,
|
nt: &Notify,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let attachment = match Attachment::find_by_id(&attachment_id, &conn) {
|
let attachment = match Attachment::find_by_id(attachment_id, conn) {
|
||||||
Some(attachment) => attachment,
|
Some(attachment) => attachment,
|
||||||
None => err!("Attachment doesn't exist"),
|
None => err!("Attachment doesn't exist"),
|
||||||
};
|
};
|
||||||
@@ -1086,17 +1415,17 @@ fn _delete_cipher_attachment_by_id(
|
|||||||
err!("Attachment from other cipher")
|
err!("Attachment from other cipher")
|
||||||
}
|
}
|
||||||
|
|
||||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let cipher = match Cipher::find_by_uuid(uuid, conn) {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||||
err!("Cipher cannot be deleted by user")
|
err!("Cipher cannot be deleted by user")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete attachment
|
// Delete attachment
|
||||||
attachment.delete(&conn)?;
|
attachment.delete(conn)?;
|
||||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
24
src/api/core/emergency_access.rs
Normal file
24
src/api/core/emergency_access.rs
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
use rocket::Route;
|
||||||
|
use rocket_contrib::json::Json;
|
||||||
|
|
||||||
|
use crate::{api::JsonResult, auth::Headers, db::DbConn};
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![get_contacts,]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This endpoint is expected to return at least something.
|
||||||
|
/// If we return an error message that will trigger error toasts for the user.
|
||||||
|
/// To prevent this we just return an empty json result with no Data.
|
||||||
|
/// When this feature is going to be implemented it also needs to return this empty Data
|
||||||
|
/// instead of throwing an error/4XX unless it really is an error.
|
||||||
|
#[get("/emergency-access/trusted")]
|
||||||
|
fn get_contacts(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||||
|
debug!("Emergency access is not supported.");
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Data": [],
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
@@ -8,28 +8,20 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<rocket::Route> {
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
routes![
|
routes![get_folders, get_folder, post_folders, post_folder, put_folder, delete_folder_post, delete_folder,]
|
||||||
get_folders,
|
|
||||||
get_folder,
|
|
||||||
post_folders,
|
|
||||||
post_folder,
|
|
||||||
put_folder,
|
|
||||||
delete_folder_post,
|
|
||||||
delete_folder,
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders")]
|
#[get("/folders")]
|
||||||
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
|
||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": folders_json,
|
"Data": folders_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders/<uuid>")]
|
#[get("/folders/<uuid>")]
|
||||||
|
@@ -1,25 +1,26 @@
|
|||||||
mod accounts;
|
mod accounts;
|
||||||
mod ciphers;
|
mod ciphers;
|
||||||
|
mod emergency_access;
|
||||||
mod folders;
|
mod folders;
|
||||||
mod organizations;
|
mod organizations;
|
||||||
|
mod sends;
|
||||||
pub mod two_factor;
|
pub mod two_factor;
|
||||||
|
|
||||||
|
pub use ciphers::purge_trashed_ciphers;
|
||||||
|
pub use sends::purge_sends;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut mod_routes = routes![
|
let mut mod_routes =
|
||||||
clear_device_token,
|
routes![clear_device_token, put_device_token, get_eq_domains, post_eq_domains, put_eq_domains, hibp_breach,];
|
||||||
put_device_token,
|
|
||||||
get_eq_domains,
|
|
||||||
post_eq_domains,
|
|
||||||
put_eq_domains,
|
|
||||||
hibp_breach,
|
|
||||||
];
|
|
||||||
|
|
||||||
let mut routes = Vec::new();
|
let mut routes = Vec::new();
|
||||||
routes.append(&mut accounts::routes());
|
routes.append(&mut accounts::routes());
|
||||||
routes.append(&mut ciphers::routes());
|
routes.append(&mut ciphers::routes());
|
||||||
|
routes.append(&mut emergency_access::routes());
|
||||||
routes.append(&mut folders::routes());
|
routes.append(&mut folders::routes());
|
||||||
routes.append(&mut organizations::routes());
|
routes.append(&mut organizations::routes());
|
||||||
routes.append(&mut two_factor::routes());
|
routes.append(&mut two_factor::routes());
|
||||||
|
routes.append(&mut sends::routes());
|
||||||
routes.append(&mut mod_routes);
|
routes.append(&mut mod_routes);
|
||||||
|
|
||||||
routes
|
routes
|
||||||
@@ -33,14 +34,15 @@ use rocket_contrib::json::Json;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcase},
|
api::{JsonResult, JsonUpcase},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::DbConn,
|
db::DbConn,
|
||||||
error::Error,
|
error::Error,
|
||||||
|
util::get_reqwest_client,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||||
fn clear_device_token(uuid: String) -> EmptyResult {
|
fn clear_device_token(uuid: String) -> &'static str {
|
||||||
// This endpoint doesn't have auth header
|
// This endpoint doesn't have auth header
|
||||||
|
|
||||||
let _ = uuid;
|
let _ = uuid;
|
||||||
@@ -49,11 +51,11 @@ fn clear_device_token(uuid: String) -> EmptyResult {
|
|||||||
// This only clears push token
|
// This only clears push token
|
||||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||||
Ok(())
|
""
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||||
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> JsonResult {
|
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> Json<Value> {
|
||||||
let _data: Value = data.into_inner().data;
|
let _data: Value = data.into_inner().data;
|
||||||
// Data has a single string value "PushToken"
|
// Data has a single string value "PushToken"
|
||||||
let _ = uuid;
|
let _ = uuid;
|
||||||
@@ -61,13 +63,13 @@ fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) ->
|
|||||||
|
|
||||||
// TODO: This should save the push token, but we don't have push functionality
|
// TODO: This should save the push token, but we don't have push functionality
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Id": headers.device.uuid,
|
"Id": headers.device.uuid,
|
||||||
"Name": headers.device.name,
|
"Name": headers.device.name,
|
||||||
"Type": headers.device.atype,
|
"Type": headers.device.atype,
|
||||||
"Identifier": headers.device.uuid,
|
"Identifier": headers.device.uuid,
|
||||||
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
@@ -81,11 +83,11 @@ struct GlobalDomain {
|
|||||||
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
||||||
|
|
||||||
#[get("/settings/domains")]
|
#[get("/settings/domains")]
|
||||||
fn get_eq_domains(headers: Headers) -> JsonResult {
|
fn get_eq_domains(headers: Headers) -> Json<Value> {
|
||||||
_get_eq_domains(headers, false)
|
_get_eq_domains(headers, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
|
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
use serde_json::from_str;
|
use serde_json::from_str;
|
||||||
|
|
||||||
@@ -102,11 +104,11 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
|
|||||||
globals.retain(|g| !g.Excluded);
|
globals.retain(|g| !g.Excluded);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"EquivalentDomains": equivalent_domains,
|
"EquivalentDomains": equivalent_domains,
|
||||||
"GlobalEquivalentDomains": globals,
|
"GlobalEquivalentDomains": globals,
|
||||||
"Object": "domains",
|
"Object": "domains",
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -141,22 +143,15 @@ fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbC
|
|||||||
|
|
||||||
#[get("/hibp/breach?<username>")]
|
#[get("/hibp/breach?<username>")]
|
||||||
fn hibp_breach(username: String) -> JsonResult {
|
fn hibp_breach(username: String) -> JsonResult {
|
||||||
let user_agent = "Bitwarden_RS";
|
|
||||||
let url = format!(
|
let url = format!(
|
||||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||||
username
|
username
|
||||||
);
|
);
|
||||||
|
|
||||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
|
||||||
|
|
||||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||||
let hibp_client = Client::builder().build()?;
|
let hibp_client = get_reqwest_client();
|
||||||
|
|
||||||
let res = hibp_client
|
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send()?;
|
||||||
.get(&url)
|
|
||||||
.header(USER_AGENT, user_agent)
|
|
||||||
.header("hibp-api-key", api_key)
|
|
||||||
.send()?;
|
|
||||||
|
|
||||||
// If we get a 404, return a 404, it means no breached accounts
|
// If we get a 404, return a 404, it means no breached accounts
|
||||||
if res.status() == 404 {
|
if res.status() == 404 {
|
||||||
@@ -172,7 +167,7 @@ fn hibp_breach(username: String) -> JsonResult {
|
|||||||
"Domain": "haveibeenpwned.com",
|
"Domain": "haveibeenpwned.com",
|
||||||
"BreachDate": "2019-08-18T00:00:00Z",
|
"BreachDate": "2019-08-18T00:00:00Z",
|
||||||
"AddedDate": "2019-08-18T00:00:00Z",
|
"AddedDate": "2019-08-18T00:00:00Z",
|
||||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||||
"LogoPath": "bwrs_static/hibp.png",
|
"LogoPath": "bwrs_static/hibp.png",
|
||||||
"PwnCount": 0,
|
"PwnCount": 0,
|
||||||
"DataClasses": [
|
"DataClasses": [
|
||||||
|
@@ -5,7 +5,7 @@ use serde_json::Value;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
|
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
|
||||||
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders},
|
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
mail, CONFIG,
|
mail, CONFIG,
|
||||||
};
|
};
|
||||||
@@ -47,7 +47,11 @@ pub fn routes() -> Vec<Route> {
|
|||||||
list_policies_token,
|
list_policies_token,
|
||||||
get_policy,
|
get_policy,
|
||||||
put_policy,
|
put_policy,
|
||||||
|
get_organization_tax,
|
||||||
get_plans,
|
get_plans,
|
||||||
|
get_plans_tax_rates,
|
||||||
|
import,
|
||||||
|
post_org_keys,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,6 +62,7 @@ struct OrgData {
|
|||||||
CollectionName: String,
|
CollectionName: String,
|
||||||
Key: String,
|
Key: String,
|
||||||
Name: String,
|
Name: String,
|
||||||
|
Keys: Option<OrgKeyData>,
|
||||||
#[serde(rename = "PlanType")]
|
#[serde(rename = "PlanType")]
|
||||||
_PlanType: NumberOrString, // Ignored, always use the same plan
|
_PlanType: NumberOrString, // Ignored, always use the same plan
|
||||||
}
|
}
|
||||||
@@ -75,6 +80,13 @@ struct NewCollectionData {
|
|||||||
Name: String,
|
Name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgKeyData {
|
||||||
|
EncryptedPrivateKey: String,
|
||||||
|
PublicKey: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/organizations", data = "<data>")]
|
#[post("/organizations", data = "<data>")]
|
||||||
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
||||||
if !CONFIG.is_org_creation_allowed(&headers.user.email) {
|
if !CONFIG.is_org_creation_allowed(&headers.user.email) {
|
||||||
@@ -82,8 +94,14 @@ fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn
|
|||||||
}
|
}
|
||||||
|
|
||||||
let data: OrgData = data.into_inner().data;
|
let data: OrgData = data.into_inner().data;
|
||||||
|
let (private_key, public_key) = if data.Keys.is_some() {
|
||||||
|
let keys: OrgKeyData = data.Keys.unwrap();
|
||||||
|
(Some(keys.EncryptedPrivateKey), Some(keys.PublicKey))
|
||||||
|
} else {
|
||||||
|
(None, None)
|
||||||
|
};
|
||||||
|
|
||||||
let org = Organization::new(data.Name, data.BillingEmail);
|
let org = Organization::new(data.Name, data.BillingEmail, private_key, public_key);
|
||||||
let mut user_org = UserOrganization::new(headers.user.uuid, org.uuid.clone());
|
let mut user_org = UserOrganization::new(headers.user.uuid, org.uuid.clone());
|
||||||
let collection = Collection::new(org.uuid.clone(), data.CollectionName);
|
let collection = Collection::new(org.uuid.clone(), data.CollectionName);
|
||||||
|
|
||||||
@@ -189,8 +207,8 @@ fn post_organization(
|
|||||||
|
|
||||||
// GET /api/collections?writeOnly=false
|
// GET /api/collections?writeOnly=false
|
||||||
#[get("/collections")]
|
#[get("/collections")]
|
||||||
fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_user_collections(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data":
|
"Data":
|
||||||
Collection::find_by_user_uuid(&headers.user.uuid, &conn)
|
Collection::find_by_user_uuid(&headers.user.uuid, &conn)
|
||||||
.iter()
|
.iter()
|
||||||
@@ -198,12 +216,12 @@ fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
.collect::<Value>(),
|
.collect::<Value>(),
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/collections")]
|
#[get("/organizations/<org_id>/collections")]
|
||||||
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data":
|
"Data":
|
||||||
Collection::find_by_organization(&org_id, &conn)
|
Collection::find_by_organization(&org_id, &conn)
|
||||||
.iter()
|
.iter()
|
||||||
@@ -211,13 +229,13 @@ fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) ->
|
|||||||
.collect::<Value>(),
|
.collect::<Value>(),
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/collections", data = "<data>")]
|
#[post("/organizations/<org_id>/collections", data = "<data>")]
|
||||||
fn post_organization_collections(
|
fn post_organization_collections(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
_headers: AdminHeaders,
|
headers: ManagerHeadersLoose,
|
||||||
data: JsonUpcase<NewCollectionData>,
|
data: JsonUpcase<NewCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -228,9 +246,22 @@ fn post_organization_collections(
|
|||||||
None => err!("Can't find organization details"),
|
None => err!("Can't find organization details"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Get the user_organization record so that we can check if the user has access to all collections.
|
||||||
|
let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||||
|
Some(u) => u,
|
||||||
|
None => err!("User is not part of organization"),
|
||||||
|
};
|
||||||
|
|
||||||
let collection = Collection::new(org.uuid, data.Name);
|
let collection = Collection::new(org.uuid, data.Name);
|
||||||
collection.save(&conn)?;
|
collection.save(&conn)?;
|
||||||
|
|
||||||
|
// If the user doesn't have access to all collections, only in case of a Manger,
|
||||||
|
// then we need to save the creating user uuid (Manager) to the users_collection table.
|
||||||
|
// Else the user will not have access to his own created collection.
|
||||||
|
if !user_org.access_all {
|
||||||
|
CollectionUser::save(&headers.user.uuid, &collection.uuid, false, false, &conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Json(collection.to_json()))
|
Ok(Json(collection.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +269,7 @@ fn post_organization_collections(
|
|||||||
fn put_organization_collection_update(
|
fn put_organization_collection_update(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
col_id: String,
|
col_id: String,
|
||||||
headers: AdminHeaders,
|
headers: ManagerHeaders,
|
||||||
data: JsonUpcase<NewCollectionData>,
|
data: JsonUpcase<NewCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -249,7 +280,7 @@ fn put_organization_collection_update(
|
|||||||
fn post_organization_collection_update(
|
fn post_organization_collection_update(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
col_id: String,
|
col_id: String,
|
||||||
_headers: AdminHeaders,
|
_headers: ManagerHeaders,
|
||||||
data: JsonUpcase<NewCollectionData>,
|
data: JsonUpcase<NewCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -317,7 +348,12 @@ fn post_organization_collection_delete_user(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
||||||
fn delete_organization_collection(org_id: String, col_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
|
fn delete_organization_collection(
|
||||||
|
org_id: String,
|
||||||
|
col_id: String,
|
||||||
|
_headers: ManagerHeaders,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
match Collection::find_by_uuid(&col_id, &conn) {
|
match Collection::find_by_uuid(&col_id, &conn) {
|
||||||
None => err!("Collection not found"),
|
None => err!("Collection not found"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
@@ -341,7 +377,7 @@ struct DeleteCollectionData {
|
|||||||
fn post_organization_collection_delete(
|
fn post_organization_collection_delete(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
col_id: String,
|
col_id: String,
|
||||||
headers: AdminHeaders,
|
headers: ManagerHeaders,
|
||||||
_data: JsonUpcase<DeleteCollectionData>,
|
_data: JsonUpcase<DeleteCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
@@ -349,7 +385,7 @@ fn post_organization_collection_delete(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
|
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
|
||||||
fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_org_collection_detail(org_id: String, coll_id: String, headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||||
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
|
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
|
||||||
None => err!("Collection not found"),
|
None => err!("Collection not found"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
@@ -363,7 +399,7 @@ fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHead
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/collections/<coll_id>/users")]
|
#[get("/organizations/<org_id>/collections/<coll_id>/users")]
|
||||||
fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_collection_users(org_id: String, coll_id: String, _headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||||
// Get org and collection, check that collection is from org
|
// Get org and collection, check that collection is from org
|
||||||
let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) {
|
let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) {
|
||||||
None => err!("Collection not found in Organization"),
|
None => err!("Collection not found in Organization"),
|
||||||
@@ -376,7 +412,7 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
|
|||||||
.map(|col_user| {
|
.map(|col_user| {
|
||||||
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
|
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_json_user_access_restrictions(&col_user)
|
.to_json_user_access_restrictions(col_user)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -388,7 +424,7 @@ fn put_collection_users(
|
|||||||
org_id: String,
|
org_id: String,
|
||||||
coll_id: String,
|
coll_id: String,
|
||||||
data: JsonUpcaseVec<CollectionData>,
|
data: JsonUpcaseVec<CollectionData>,
|
||||||
_headers: AdminHeaders,
|
_headers: ManagerHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
// Get org and collection, check that collection is from org
|
// Get org and collection, check that collection is from org
|
||||||
@@ -410,9 +446,7 @@ fn put_collection_users(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
CollectionUser::save(&user.user_uuid, &coll_id,
|
CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, d.HidePasswords, &conn)?;
|
||||||
d.ReadOnly, d.HidePasswords,
|
|
||||||
&conn)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -425,29 +459,53 @@ struct OrgIdData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers/organization-details?<data..>")]
|
#[get("/ciphers/organization-details?<data..>")]
|
||||||
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let ciphers = Cipher::find_by_org(&data.organization_id, &conn);
|
let ciphers = Cipher::find_by_org(&data.organization_id, &conn);
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> =
|
||||||
.iter()
|
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": ciphers_json,
|
"Data": ciphers_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/users")]
|
#[get("/organizations/<org_id>/users")]
|
||||||
fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json<Value> {
|
||||||
let users = UserOrganization::find_by_org(&org_id, &conn);
|
let users = UserOrganization::find_by_org(&org_id, &conn);
|
||||||
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
|
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": users_json,
|
"Data": users_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/organizations/<org_id>/keys", data = "<data>")]
|
||||||
|
fn post_org_keys(org_id: String, data: JsonUpcase<OrgKeyData>, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
|
let data: OrgKeyData = data.into_inner().data;
|
||||||
|
|
||||||
|
let mut org = match Organization::find_by_uuid(&org_id, &conn) {
|
||||||
|
Some(organization) => {
|
||||||
|
if organization.private_key.is_some() && organization.public_key.is_some() {
|
||||||
|
err!("Organization Keys already exist")
|
||||||
|
}
|
||||||
|
organization
|
||||||
|
}
|
||||||
|
None => err!("Can't find organization details"),
|
||||||
|
};
|
||||||
|
|
||||||
|
org.private_key = Some(data.EncryptedPrivateKey);
|
||||||
|
org.public_key = Some(data.PublicKey);
|
||||||
|
|
||||||
|
org.save(&conn)?;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Object": "organizationKeys",
|
||||||
|
"PublicKey": org.public_key,
|
||||||
|
"PrivateKey": org.private_key,
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -487,13 +545,13 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||||||
} else {
|
} else {
|
||||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||||
};
|
};
|
||||||
let user = match User::find_by_mail(&email, &conn) {
|
let user = match User::find_by_mail(email, &conn) {
|
||||||
None => {
|
None => {
|
||||||
if !CONFIG.invitations_allowed() {
|
if !CONFIG.invitations_allowed() {
|
||||||
err!(format!("User does not exist: {}", email))
|
err!(format!("User does not exist: {}", email))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !CONFIG.is_email_domain_allowed(&email) {
|
if !CONFIG.is_email_domain_allowed(email) {
|
||||||
err!("Email domain not eligible for invitations")
|
err!("Email domain not eligible for invitations")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -528,9 +586,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||||
None => err!("Collection not found in Organization"),
|
None => err!("Collection not found in Organization"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
CollectionUser::save(&user.uuid, &collection.uuid,
|
CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, col.HidePasswords, &conn)?;
|
||||||
col.ReadOnly, col.HidePasswords,
|
|
||||||
&conn)?;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -545,7 +601,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||||||
};
|
};
|
||||||
|
|
||||||
mail::send_invite(
|
mail::send_invite(
|
||||||
&email,
|
email,
|
||||||
&user.uuid,
|
&user.uuid,
|
||||||
Some(org_id.clone()),
|
Some(org_id.clone()),
|
||||||
Some(new_user.uuid),
|
Some(new_user.uuid),
|
||||||
@@ -615,7 +671,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
|||||||
// The web-vault passes org_id and org_user_id in the URL, but we are just reading them from the JWT instead
|
// The web-vault passes org_id and org_user_id in the URL, but we are just reading them from the JWT instead
|
||||||
let data: AcceptData = data.into_inner().data;
|
let data: AcceptData = data.into_inner().data;
|
||||||
let token = &data.Token;
|
let token = &data.Token;
|
||||||
let claims = decode_invite(&token)?;
|
let claims = decode_invite(token)?;
|
||||||
|
|
||||||
match User::find_by_mail(&claims.email, &conn) {
|
match User::find_by_mail(&claims.email, &conn) {
|
||||||
Some(_) => {
|
Some(_) => {
|
||||||
@@ -631,6 +687,19 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
|||||||
err!("User already accepted the invitation")
|
err!("User already accepted the invitation")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let user_twofactor_disabled = TwoFactor::find_by_user(&user_org.user_uuid, &conn).is_empty();
|
||||||
|
|
||||||
|
let policy = OrgPolicyType::TwoFactorAuthentication as i32;
|
||||||
|
let org_twofactor_policy_enabled =
|
||||||
|
match OrgPolicy::find_by_org_and_type(&user_org.org_uuid, policy, &conn) {
|
||||||
|
Some(p) => p.enabled,
|
||||||
|
None => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
if org_twofactor_policy_enabled && user_twofactor_disabled {
|
||||||
|
err!("You cannot join this organization until you enable two-step login on your user account.")
|
||||||
|
}
|
||||||
|
|
||||||
user_org.status = UserOrgStatus::Accepted as i32;
|
user_org.status = UserOrgStatus::Accepted as i32;
|
||||||
user_org.save(&conn)?;
|
user_org.save(&conn)?;
|
||||||
}
|
}
|
||||||
@@ -639,9 +708,9 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
|||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
let mut org_name = String::from("bitwarden_rs");
|
let mut org_name = CONFIG.invitation_org_name();
|
||||||
if let Some(org_id) = &claims.org_id {
|
if let Some(org_id) = &claims.org_id {
|
||||||
org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
org_name = match Organization::find_by_uuid(org_id, &conn) {
|
||||||
Some(org) => org.name,
|
Some(org) => org.name,
|
||||||
None => err!("Organization not found."),
|
None => err!("Organization not found."),
|
||||||
};
|
};
|
||||||
@@ -785,9 +854,13 @@ fn edit_user(
|
|||||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||||
None => err!("Collection not found in Organization"),
|
None => err!("Collection not found in Organization"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid,
|
CollectionUser::save(
|
||||||
col.ReadOnly, col.HidePasswords,
|
&user_to_edit.user_uuid,
|
||||||
&conn)?;
|
&collection.uuid,
|
||||||
|
col.ReadOnly,
|
||||||
|
col.HidePasswords,
|
||||||
|
&conn,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -883,16 +956,8 @@ fn post_org_import(
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|cipher_data| {
|
.map(|cipher_data| {
|
||||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||||
update_cipher_from_data(
|
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::CipherCreate)
|
||||||
&mut cipher,
|
.ok();
|
||||||
cipher_data,
|
|
||||||
&headers,
|
|
||||||
false,
|
|
||||||
&conn,
|
|
||||||
&nt,
|
|
||||||
UpdateType::CipherCreate,
|
|
||||||
)
|
|
||||||
.ok();
|
|
||||||
cipher
|
cipher
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@@ -914,15 +979,15 @@ fn post_org_import(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/policies")]
|
#[get("/organizations/<org_id>/policies")]
|
||||||
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
|
||||||
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
||||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": policies_json,
|
"Data": policies_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/policies/token?<token>")]
|
#[get("/organizations/<org_id>/policies/token?<token>")]
|
||||||
@@ -953,7 +1018,7 @@ fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResul
|
|||||||
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||||
Some(pt) => pt,
|
Some(pt) => pt,
|
||||||
None => err!("Invalid policy type"),
|
None => err!("Invalid or unsupported policy type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||||
@@ -973,7 +1038,13 @@ struct PolicyData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/organizations/<org_id>/policies/<pol_type>", data = "<data>")]
|
#[put("/organizations/<org_id>/policies/<pol_type>", data = "<data>")]
|
||||||
fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn put_policy(
|
||||||
|
org_id: String,
|
||||||
|
pol_type: i32,
|
||||||
|
data: Json<PolicyData>,
|
||||||
|
_headers: AdminHeaders,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
let data: PolicyData = data.into_inner();
|
let data: PolicyData = data.into_inner();
|
||||||
|
|
||||||
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||||
@@ -981,6 +1052,24 @@ fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: A
|
|||||||
None => err!("Invalid policy type"),
|
None => err!("Invalid policy type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
|
||||||
|
let org_list = UserOrganization::find_by_org(&org_id, &conn);
|
||||||
|
|
||||||
|
for user_org in org_list.into_iter() {
|
||||||
|
let user_twofactor_disabled = TwoFactor::find_by_user(&user_org.user_uuid, &conn).is_empty();
|
||||||
|
|
||||||
|
if user_twofactor_disabled && user_org.atype < UserOrgType::Admin {
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).unwrap();
|
||||||
|
let user = User::find_by_uuid(&user_org.user_uuid, &conn).unwrap();
|
||||||
|
|
||||||
|
mail::send_2fa_removed_from_org(&user.email, &org.name)?;
|
||||||
|
}
|
||||||
|
user_org.delete(&conn)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||||
Some(p) => p,
|
Some(p) => p,
|
||||||
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
|
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
|
||||||
@@ -993,9 +1082,16 @@ fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: A
|
|||||||
Ok(Json(policy.to_json()))
|
Ok(Json(policy.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(unused_variables)]
|
||||||
|
#[get("/organizations/<org_id>/tax")]
|
||||||
|
fn get_organization_tax(org_id: String, _headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||||
|
// Prevent a 404 error, which also causes Javascript errors.
|
||||||
|
err!("Only allowed when not self hosted.")
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/plans")]
|
#[get("/plans")]
|
||||||
fn get_plans(_headers: Headers, _conn: DbConn) -> JsonResult {
|
fn get_plans(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"Data": [
|
"Data": [
|
||||||
{
|
{
|
||||||
@@ -1042,5 +1138,111 @@ fn get_plans(_headers: Headers, _conn: DbConn) -> JsonResult {
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/plans/sales-tax-rates")]
|
||||||
|
fn get_plans_tax_rates(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||||
|
// Prevent a 404 error, which also causes Javascript errors.
|
||||||
|
Json(json!({
|
||||||
|
"Object": "list",
|
||||||
|
"Data": [],
|
||||||
|
"ContinuationToken": null
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgImportGroupData {
|
||||||
|
Name: String, // "GroupName"
|
||||||
|
ExternalId: String, // "cn=GroupName,ou=Groups,dc=example,dc=com"
|
||||||
|
Users: Vec<String>, // ["uid=user,ou=People,dc=example,dc=com"]
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgImportUserData {
|
||||||
|
Email: String, // "user@maildomain.net"
|
||||||
|
ExternalId: String, // "uid=user,ou=People,dc=example,dc=com"
|
||||||
|
Deleted: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgImportData {
|
||||||
|
Groups: Vec<OrgImportGroupData>,
|
||||||
|
OverwriteExisting: bool,
|
||||||
|
Users: Vec<OrgImportUserData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/organizations/<org_id>/import", data = "<data>")]
|
||||||
|
fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
let data = data.into_inner().data;
|
||||||
|
|
||||||
|
// TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way
|
||||||
|
// to differentiate between auto-imported users and manually added ones.
|
||||||
|
// This means that this endpoint can end up removing users that were added manually by an admin,
|
||||||
|
// as opposed to upstream which only removes auto-imported users.
|
||||||
|
|
||||||
|
// User needs to be admin or owner to use the Directry Connector
|
||||||
|
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||||
|
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
||||||
|
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
||||||
|
None => err!("User not part of organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
for user_data in &data.Users {
|
||||||
|
if user_data.Deleted {
|
||||||
|
// If user is marked for deletion and it exists, delete it
|
||||||
|
if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn) {
|
||||||
|
user_org.delete(&conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If user is not part of the organization, but it exists
|
||||||
|
} else if UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).is_none() {
|
||||||
|
if let Some(user) = User::find_by_mail(&user_data.Email, &conn) {
|
||||||
|
let user_org_status = if CONFIG.mail_enabled() {
|
||||||
|
UserOrgStatus::Invited as i32
|
||||||
|
} else {
|
||||||
|
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||||
|
new_org_user.access_all = false;
|
||||||
|
new_org_user.atype = UserOrgType::User as i32;
|
||||||
|
new_org_user.status = user_org_status;
|
||||||
|
|
||||||
|
new_org_user.save(&conn)?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||||
|
Some(org) => org.name,
|
||||||
|
None => err!("Error looking up organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
mail::send_invite(
|
||||||
|
&user_data.Email,
|
||||||
|
&user.uuid,
|
||||||
|
Some(org_id.clone()),
|
||||||
|
Some(new_org_user.uuid),
|
||||||
|
&org_name,
|
||||||
|
Some(headers.user.email.clone()),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||||
|
if data.OverwriteExisting {
|
||||||
|
for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User as i32, &conn) {
|
||||||
|
if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).map(|u| u.email) {
|
||||||
|
if !data.Users.iter().any(|u| u.Email == user_email) {
|
||||||
|
user_org.delete(&conn)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
440
src/api/core/sends.rs
Normal file
440
src/api/core/sends.rs
Normal file
@@ -0,0 +1,440 @@
|
|||||||
|
use std::{io::Read, path::Path};
|
||||||
|
|
||||||
|
use chrono::{DateTime, Duration, Utc};
|
||||||
|
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||||
|
use rocket::{http::ContentType, response::NamedFile, Data};
|
||||||
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||||
|
auth::{Headers, Host},
|
||||||
|
db::{models::*, DbConn, DbPool},
|
||||||
|
util::SafeString,
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
|
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
|
routes![
|
||||||
|
post_send,
|
||||||
|
post_send_file,
|
||||||
|
post_access,
|
||||||
|
post_access_file,
|
||||||
|
put_send,
|
||||||
|
delete_send,
|
||||||
|
put_remove_password,
|
||||||
|
download_send
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn purge_sends(pool: DbPool) {
|
||||||
|
debug!("Purging sends");
|
||||||
|
if let Ok(conn) = pool.get() {
|
||||||
|
Send::purge(&conn);
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while purging sends")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub struct SendData {
|
||||||
|
pub Type: i32,
|
||||||
|
pub Key: String,
|
||||||
|
pub Password: Option<String>,
|
||||||
|
pub MaxAccessCount: Option<i32>,
|
||||||
|
pub ExpirationDate: Option<DateTime<Utc>>,
|
||||||
|
pub DeletionDate: DateTime<Utc>,
|
||||||
|
pub Disabled: bool,
|
||||||
|
pub HideEmail: Option<bool>,
|
||||||
|
|
||||||
|
// Data field
|
||||||
|
pub Name: String,
|
||||||
|
pub Notes: Option<String>,
|
||||||
|
pub Text: Option<Value>,
|
||||||
|
pub File: Option<Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||||
|
/// an org with this policy enabled isn't allowed to create new Sends or
|
||||||
|
/// modify existing ones, but is allowed to delete them.
|
||||||
|
///
|
||||||
|
/// Ref: https://bitwarden.com/help/article/policies/#disable-send
|
||||||
|
///
|
||||||
|
/// There is also a Vaultwarden-specific `sends_allowed` config setting that
|
||||||
|
/// controls this policy globally.
|
||||||
|
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
let policy_type = OrgPolicyType::DisableSend;
|
||||||
|
if !CONFIG.sends_allowed() || OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||||
|
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Enforces the `DisableHideEmail` option of the `Send Options` policy.
|
||||||
|
/// A non-owner/admin user belonging to an org with this option enabled isn't
|
||||||
|
/// allowed to hide their email address from the recipient of a Bitwarden Send,
|
||||||
|
/// but is allowed to remove this option from an existing Send.
|
||||||
|
///
|
||||||
|
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||||
|
fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
let hide_email = data.HideEmail.unwrap_or(false);
|
||||||
|
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn) {
|
||||||
|
err!(
|
||||||
|
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||||
|
from recipients when creating or editing a Send."
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||||
|
let data_val = if data.Type == SendType::Text as i32 {
|
||||||
|
data.Text
|
||||||
|
} else if data.Type == SendType::File as i32 {
|
||||||
|
data.File
|
||||||
|
} else {
|
||||||
|
err!("Invalid Send type")
|
||||||
|
};
|
||||||
|
|
||||||
|
let data_str = if let Some(mut d) = data_val {
|
||||||
|
d.as_object_mut().and_then(|o| o.remove("Response"));
|
||||||
|
serde_json::to_string(&d)?
|
||||||
|
} else {
|
||||||
|
err!("Send data not provided");
|
||||||
|
};
|
||||||
|
|
||||||
|
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||||
|
err!(
|
||||||
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
||||||
|
send.user_uuid = Some(user_uuid);
|
||||||
|
send.notes = data.Notes;
|
||||||
|
send.max_access_count = data.MaxAccessCount;
|
||||||
|
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||||
|
send.disabled = data.Disabled;
|
||||||
|
send.hide_email = data.HideEmail;
|
||||||
|
send.atype = data.Type;
|
||||||
|
|
||||||
|
send.set_password(data.Password.as_deref());
|
||||||
|
|
||||||
|
Ok(send)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/sends", data = "<data>")]
|
||||||
|
fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
enforce_disable_send_policy(&headers, &conn)?;
|
||||||
|
|
||||||
|
let data: SendData = data.into_inner().data;
|
||||||
|
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||||
|
|
||||||
|
if data.Type == SendType::File as i32 {
|
||||||
|
err!("File sends should use /api/sends/file")
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut send = create_send(data, headers.user.uuid.clone())?;
|
||||||
|
send.save(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||||
|
fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
enforce_disable_send_policy(&headers, &conn)?;
|
||||||
|
|
||||||
|
let boundary = content_type.params().next().expect("No boundary provided").1;
|
||||||
|
|
||||||
|
let mut mpart = Multipart::with_body(data.open(), boundary);
|
||||||
|
|
||||||
|
// First entry is the SendData JSON
|
||||||
|
let mut model_entry = match mpart.read_entry()? {
|
||||||
|
Some(e) if &*e.headers.name == "model" => e,
|
||||||
|
Some(_) => err!("Invalid entry name"),
|
||||||
|
None => err!("No model entry present"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut buf = String::new();
|
||||||
|
model_entry.data.read_to_string(&mut buf)?;
|
||||||
|
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
|
||||||
|
enforce_disable_hide_email_policy(&data.data, &headers, &conn)?;
|
||||||
|
|
||||||
|
// Get the file length and add an extra 5% to avoid issues
|
||||||
|
const SIZE_525_MB: u64 = 550_502_400;
|
||||||
|
|
||||||
|
let size_limit = match CONFIG.user_attachment_limit() {
|
||||||
|
Some(0) => err!("File uploads are disabled"),
|
||||||
|
Some(limit_kb) => {
|
||||||
|
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
|
||||||
|
if left <= 0 {
|
||||||
|
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||||
|
}
|
||||||
|
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||||
|
}
|
||||||
|
None => SIZE_525_MB,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create the Send
|
||||||
|
let mut send = create_send(data.data, headers.user.uuid.clone())?;
|
||||||
|
let file_id = crate::crypto::generate_send_id();
|
||||||
|
|
||||||
|
if send.atype != SendType::File as i32 {
|
||||||
|
err!("Send content is not a file");
|
||||||
|
}
|
||||||
|
|
||||||
|
let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id);
|
||||||
|
|
||||||
|
// Read the data entry and save the file
|
||||||
|
let mut data_entry = match mpart.read_entry()? {
|
||||||
|
Some(e) if &*e.headers.name == "data" => e,
|
||||||
|
Some(_) => err!("Invalid entry name"),
|
||||||
|
None => err!("No model entry present"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let size = match data_entry.data.save().memory_threshold(0).size_limit(size_limit).with_path(&file_path) {
|
||||||
|
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||||
|
SaveResult::Full(other) => {
|
||||||
|
std::fs::remove_file(&file_path).ok();
|
||||||
|
err!(format!("Attachment is not a file: {:?}", other));
|
||||||
|
}
|
||||||
|
SaveResult::Partial(_, reason) => {
|
||||||
|
std::fs::remove_file(&file_path).ok();
|
||||||
|
err!(format!("Attachment storage limit exceeded with this file: {:?}", reason));
|
||||||
|
}
|
||||||
|
SaveResult::Error(e) => {
|
||||||
|
std::fs::remove_file(&file_path).ok();
|
||||||
|
err!(format!("Error: {:?}", e));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Set ID and sizes
|
||||||
|
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||||
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
|
o.insert(String::from("Id"), Value::String(file_id));
|
||||||
|
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||||
|
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
||||||
|
}
|
||||||
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
|
|
||||||
|
// Save the changes in the database
|
||||||
|
send.save(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub struct SendAccessData {
|
||||||
|
pub Password: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||||
|
fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn) -> JsonResult {
|
||||||
|
let mut send = match Send::find_by_access_id(&access_id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(max_access_count) = send.max_access_count {
|
||||||
|
if send.access_count >= max_access_count {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(expiration) = send.expiration_date {
|
||||||
|
if Utc::now().naive_utc() >= expiration {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if Utc::now().naive_utc() >= send.deletion_date {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.disabled {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.password_hash.is_some() {
|
||||||
|
match data.into_inner().data.Password {
|
||||||
|
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||||
|
Some(_) => err!("Invalid password."),
|
||||||
|
None => err_code!("Password not provided", 401),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Files are incremented during the download
|
||||||
|
if send.atype == SendType::Text as i32 {
|
||||||
|
send.access_count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
send.save(&conn)?;
|
||||||
|
|
||||||
|
Ok(Json(send.to_json_access(&conn)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||||
|
fn post_access_file(
|
||||||
|
send_id: String,
|
||||||
|
file_id: String,
|
||||||
|
data: JsonUpcase<SendAccessData>,
|
||||||
|
host: Host,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
let mut send = match Send::find_by_uuid(&send_id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(max_access_count) = send.max_access_count {
|
||||||
|
if send.access_count >= max_access_count {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(expiration) = send.expiration_date {
|
||||||
|
if Utc::now().naive_utc() >= expiration {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if Utc::now().naive_utc() >= send.deletion_date {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.disabled {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.password_hash.is_some() {
|
||||||
|
match data.into_inner().data.Password {
|
||||||
|
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||||
|
Some(_) => err!("Invalid password."),
|
||||||
|
None => err_code!("Password not provided", 401),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
send.access_count += 1;
|
||||||
|
|
||||||
|
send.save(&conn)?;
|
||||||
|
|
||||||
|
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||||
|
let token = crate::auth::encode_jwt(&token_claims);
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Object": "send-fileDownload",
|
||||||
|
"Id": file_id,
|
||||||
|
"Url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||||
|
fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||||
|
if let Ok(claims) = crate::auth::decode_send(&t) {
|
||||||
|
if claims.sub == format!("{}/{}", send_id, file_id) {
|
||||||
|
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/sends/<id>", data = "<data>")]
|
||||||
|
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
enforce_disable_send_policy(&headers, &conn)?;
|
||||||
|
|
||||||
|
let data: SendData = data.into_inner().data;
|
||||||
|
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||||
|
|
||||||
|
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err!("Send not found"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
|
err!("Send is not owned by user")
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.atype != data.Type {
|
||||||
|
err!("Sends can't change type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
||||||
|
// so we only need to update the data field in the Text case
|
||||||
|
if data.Type == SendType::Text as i32 {
|
||||||
|
let data_str = if let Some(mut d) = data.Text {
|
||||||
|
d.as_object_mut().and_then(|d| d.remove("Response"));
|
||||||
|
serde_json::to_string(&d)?
|
||||||
|
} else {
|
||||||
|
err!("Send data not provided");
|
||||||
|
};
|
||||||
|
send.data = data_str;
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||||
|
err!(
|
||||||
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
send.name = data.Name;
|
||||||
|
send.akey = data.Key;
|
||||||
|
send.deletion_date = data.DeletionDate.naive_utc();
|
||||||
|
send.notes = data.Notes;
|
||||||
|
send.max_access_count = data.MaxAccessCount;
|
||||||
|
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||||
|
send.hide_email = data.HideEmail;
|
||||||
|
send.disabled = data.Disabled;
|
||||||
|
|
||||||
|
// Only change the value if it's present
|
||||||
|
if let Some(password) = data.Password {
|
||||||
|
send.set_password(Some(&password));
|
||||||
|
}
|
||||||
|
|
||||||
|
send.save(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/sends/<id>")]
|
||||||
|
fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
let send = match Send::find_by_uuid(&id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err!("Send not found"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
|
err!("Send is not owned by user")
|
||||||
|
}
|
||||||
|
|
||||||
|
send.delete(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendDelete, &headers.user);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/sends/<id>/remove-password")]
|
||||||
|
fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
enforce_disable_send_policy(&headers, &conn)?;
|
||||||
|
|
||||||
|
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err!("Send not found"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
|
err!("Send is not owned by user")
|
||||||
|
}
|
||||||
|
|
||||||
|
send.set_password(None);
|
||||||
|
send.save(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
@@ -17,11 +17,7 @@ use crate::{
|
|||||||
pub use crate::config::CONFIG;
|
pub use crate::config::CONFIG;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![
|
routes![generate_authenticator, activate_authenticator, activate_authenticator_put,]
|
||||||
generate_authenticator,
|
|
||||||
activate_authenticator,
|
|
||||||
activate_authenticator_put,
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||||
@@ -118,7 +114,7 @@ pub fn validate_totp_code_str(
|
|||||||
_ => err!("TOTP code is not a number"),
|
_ => err!("TOTP code is not a number"),
|
||||||
};
|
};
|
||||||
|
|
||||||
validate_totp_code(user_uuid, totp_code, secret, ip, &conn)
|
validate_totp_code(user_uuid, totp_code, secret, ip, conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -129,7 +125,7 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &Cl
|
|||||||
Err(_) => err!("Invalid TOTP secret"),
|
Err(_) => err!("Invalid TOTP secret"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut twofactor = match TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Authenticator as i32, &conn) {
|
let mut twofactor = match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn) {
|
||||||
Some(tf) => tf,
|
Some(tf) => tf,
|
||||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||||
};
|
};
|
||||||
@@ -141,7 +137,7 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &Cl
|
|||||||
// The amount of steps back and forward in time
|
// The amount of steps back and forward in time
|
||||||
// Also check if we need to disable time drifted TOTP codes.
|
// Also check if we need to disable time drifted TOTP codes.
|
||||||
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
||||||
let steps: i64 = if CONFIG.authenticator_disable_time_drift() { 0 } else { 1 };
|
let steps = !CONFIG.authenticator_disable_time_drift() as i64;
|
||||||
|
|
||||||
for step in -steps..=steps {
|
for step in -steps..=steps {
|
||||||
let time_step = current_timestamp / 30i64 + step;
|
let time_step = current_timestamp / 30i64 + step;
|
||||||
@@ -160,25 +156,14 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &Cl
|
|||||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||||
// This will also save a newly created twofactor if the code is correct.
|
// This will also save a newly created twofactor if the code is correct.
|
||||||
twofactor.last_used = time_step as i32;
|
twofactor.last_used = time_step as i32;
|
||||||
twofactor.save(&conn)?;
|
twofactor.save(conn)?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||||
warn!(
|
warn!("This or a TOTP code within {} steps back and forward has already been used!", steps);
|
||||||
"This or a TOTP code within {} steps back and forward has already been used!",
|
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||||
steps
|
|
||||||
);
|
|
||||||
err!(format!(
|
|
||||||
"Invalid TOTP code! Server time: {} IP: {}",
|
|
||||||
current_time.format("%F %T UTC"),
|
|
||||||
ip.ip
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Else no valide code received, deny access
|
// Else no valide code received, deny access
|
||||||
err!(format!(
|
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||||
"Invalid TOTP code! Server time: {} IP: {}",
|
|
||||||
current_time.format("%F %T UTC"),
|
|
||||||
ip.ip
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
@@ -12,6 +12,7 @@ use crate::{
|
|||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::MapResult,
|
error::MapResult,
|
||||||
|
util::get_reqwest_client,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -59,7 +60,11 @@ impl DuoData {
|
|||||||
ik.replace_range(digits.., replaced);
|
ik.replace_range(digits.., replaced);
|
||||||
sk.replace_range(digits.., replaced);
|
sk.replace_range(digits.., replaced);
|
||||||
|
|
||||||
Self { host, ik, sk }
|
Self {
|
||||||
|
host,
|
||||||
|
ik,
|
||||||
|
sk,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,9 +190,7 @@ fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbC
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||||
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
|
use reqwest::{header, Method};
|
||||||
|
|
||||||
use reqwest::{blocking::Client, header::*, Method};
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
// https://duo.com/docs/authapi#api-details
|
// https://duo.com/docs/authapi#api-details
|
||||||
@@ -199,11 +202,13 @@ fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> Em
|
|||||||
|
|
||||||
let m = Method::from_str(method).unwrap_or_default();
|
let m = Method::from_str(method).unwrap_or_default();
|
||||||
|
|
||||||
Client::new()
|
let client = get_reqwest_client();
|
||||||
|
|
||||||
|
client
|
||||||
.request(m, &url)
|
.request(m, &url)
|
||||||
.basic_auth(username, Some(password))
|
.basic_auth(username, Some(password))
|
||||||
.header(USER_AGENT, AGENT)
|
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||||
.header(DATE, date)
|
.header(header::DATE, date)
|
||||||
.send()?
|
.send()?
|
||||||
.error_for_status()?;
|
.error_for_status()?;
|
||||||
|
|
||||||
@@ -221,7 +226,7 @@ fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
|||||||
let type_ = TwoFactorType::Duo as i32;
|
let type_ = TwoFactorType::Duo as i32;
|
||||||
|
|
||||||
// If the user doesn't have an entry, disabled
|
// If the user doesn't have an entry, disabled
|
||||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, &conn) {
|
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn) {
|
||||||
Some(t) => t,
|
Some(t) => t,
|
||||||
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
||||||
};
|
};
|
||||||
@@ -242,8 +247,8 @@ fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
|||||||
|
|
||||||
// let (ik, sk, ak, host) = get_duo_keys();
|
// let (ik, sk, ak, host) = get_duo_keys();
|
||||||
fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
|
fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
|
||||||
let data = User::find_by_mail(email, &conn)
|
let data = User::find_by_mail(email, conn)
|
||||||
.and_then(|u| get_user_duo_data(&u.uuid, &conn).data())
|
.and_then(|u| get_user_duo_data(&u.uuid, conn).data())
|
||||||
.or_else(DuoData::global)
|
.or_else(DuoData::global)
|
||||||
.map_res("Can't fetch Duo keys")?;
|
.map_res("Can't fetch Duo keys")?;
|
||||||
|
|
||||||
@@ -338,7 +343,7 @@ fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -
|
|||||||
err!("Invalid ikey")
|
err!("Invalid ikey")
|
||||||
}
|
}
|
||||||
|
|
||||||
let expire = match expire.parse() {
|
let expire: i64 = match expire.parse() {
|
||||||
Ok(e) => e,
|
Ok(e) => e,
|
||||||
Err(_) => err!("Invalid expire time"),
|
Err(_) => err!("Invalid expire time"),
|
||||||
};
|
};
|
||||||
|
@@ -56,14 +56,14 @@ fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> Empty
|
|||||||
/// Generate the token, save the data for later verification and send email to user
|
/// Generate the token, save the data for later verification and send email to user
|
||||||
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
let type_ = TwoFactorType::Email as i32;
|
let type_ = TwoFactorType::Email as i32;
|
||||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn).map_res("Two factor not found")?;
|
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, conn).map_res("Two factor not found")?;
|
||||||
|
|
||||||
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
|
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
|
||||||
|
|
||||||
let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?;
|
let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||||
twofactor_data.set_token(generated_token);
|
twofactor_data.set_token(generated_token);
|
||||||
twofactor.data = twofactor_data.to_json();
|
twofactor.data = twofactor_data.to_json();
|
||||||
twofactor.save(&conn)?;
|
twofactor.save(conn)?;
|
||||||
|
|
||||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||||
|
|
||||||
@@ -125,11 +125,7 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
|
|||||||
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
|
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
|
||||||
|
|
||||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||||
let twofactor = TwoFactor::new(
|
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
||||||
user.uuid,
|
|
||||||
TwoFactorType::EmailVerificationChallenge,
|
|
||||||
twofactor_data.to_json(),
|
|
||||||
);
|
|
||||||
twofactor.save(&conn)?;
|
twofactor.save(&conn)?;
|
||||||
|
|
||||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||||
@@ -185,8 +181,9 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
|||||||
|
|
||||||
/// Validate the email code when used as TwoFactor token mechanism
|
/// Validate the email code when used as TwoFactor token mechanism
|
||||||
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
||||||
let mut email_data = EmailTokenData::from_json(&data)?;
|
let mut email_data = EmailTokenData::from_json(data)?;
|
||||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn).map_res("Two factor not found")?;
|
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn)
|
||||||
|
.map_res("Two factor not found")?;
|
||||||
let issued_token = match &email_data.last_token {
|
let issued_token = match &email_data.last_token {
|
||||||
Some(t) => t,
|
Some(t) => t,
|
||||||
_ => err!("No token available"),
|
_ => err!("No token available"),
|
||||||
@@ -198,14 +195,14 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &
|
|||||||
email_data.reset_token();
|
email_data.reset_token();
|
||||||
}
|
}
|
||||||
twofactor.data = email_data.to_json();
|
twofactor.data = email_data.to_json();
|
||||||
twofactor.save(&conn)?;
|
twofactor.save(conn)?;
|
||||||
|
|
||||||
err!("Token is invalid")
|
err!("Token is invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
email_data.reset_token();
|
email_data.reset_token();
|
||||||
twofactor.data = email_data.to_json();
|
twofactor.data = email_data.to_json();
|
||||||
twofactor.save(&conn)?;
|
twofactor.save(conn)?;
|
||||||
|
|
||||||
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
|
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
|
||||||
let max_time = CONFIG.email_expiration_time() as i64;
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
@@ -258,7 +255,7 @@ impl EmailTokenData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
|
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
|
||||||
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(&string);
|
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(string);
|
||||||
match res {
|
match res {
|
||||||
Ok(x) => Ok(x),
|
Ok(x) => Ok(x),
|
||||||
Err(_) => err!("Could not decode EmailTokenData from string"),
|
Err(_) => err!("Could not decode EmailTokenData from string"),
|
||||||
@@ -295,7 +292,7 @@ mod tests {
|
|||||||
fn test_obscure_email_long() {
|
fn test_obscure_email_long() {
|
||||||
let email = "bytes@example.ext";
|
let email = "bytes@example.ext";
|
||||||
|
|
||||||
let result = obscure_email(&email);
|
let result = obscure_email(email);
|
||||||
|
|
||||||
// Only first two characters should be visible.
|
// Only first two characters should be visible.
|
||||||
assert_eq!(result, "by***@example.ext");
|
assert_eq!(result, "by***@example.ext");
|
||||||
@@ -305,7 +302,7 @@ mod tests {
|
|||||||
fn test_obscure_email_short() {
|
fn test_obscure_email_short() {
|
||||||
let email = "byt@example.ext";
|
let email = "byt@example.ext";
|
||||||
|
|
||||||
let result = obscure_email(&email);
|
let result = obscure_email(email);
|
||||||
|
|
||||||
// If it's smaller than 3 characters it should only show asterisks.
|
// If it's smaller than 3 characters it should only show asterisks.
|
||||||
assert_eq!(result, "***@example.ext");
|
assert_eq!(result, "***@example.ext");
|
||||||
|
@@ -7,46 +7,40 @@ use crate::{
|
|||||||
api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{
|
db::{models::*, DbConn},
|
||||||
models::{TwoFactor, User},
|
mail, CONFIG,
|
||||||
DbConn,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod authenticator;
|
pub mod authenticator;
|
||||||
pub mod duo;
|
pub mod duo;
|
||||||
pub mod email;
|
pub mod email;
|
||||||
pub mod u2f;
|
pub mod u2f;
|
||||||
|
pub mod webauthn;
|
||||||
pub mod yubikey;
|
pub mod yubikey;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut routes = routes![
|
let mut routes = routes![get_twofactor, get_recover, recover, disable_twofactor, disable_twofactor_put,];
|
||||||
get_twofactor,
|
|
||||||
get_recover,
|
|
||||||
recover,
|
|
||||||
disable_twofactor,
|
|
||||||
disable_twofactor_put,
|
|
||||||
];
|
|
||||||
|
|
||||||
routes.append(&mut authenticator::routes());
|
routes.append(&mut authenticator::routes());
|
||||||
routes.append(&mut duo::routes());
|
routes.append(&mut duo::routes());
|
||||||
routes.append(&mut email::routes());
|
routes.append(&mut email::routes());
|
||||||
routes.append(&mut u2f::routes());
|
routes.append(&mut u2f::routes());
|
||||||
|
routes.append(&mut webauthn::routes());
|
||||||
routes.append(&mut yubikey::routes());
|
routes.append(&mut yubikey::routes());
|
||||||
|
|
||||||
routes
|
routes
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/two-factor")]
|
#[get("/two-factor")]
|
||||||
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
|
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
|
||||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": twofactors_json,
|
"Data": twofactors_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-recover", data = "<data>")]
|
#[post("/two-factor/get-recover", data = "<data>")]
|
||||||
@@ -134,6 +128,23 @@ fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, c
|
|||||||
twofactor.delete(&conn)?;
|
twofactor.delete(&conn)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &conn).is_empty();
|
||||||
|
|
||||||
|
if twofactor_disabled {
|
||||||
|
let policy_type = OrgPolicyType::TwoFactorAuthentication;
|
||||||
|
let org_list = UserOrganization::find_by_user_and_policy(&user.uuid, policy_type, &conn);
|
||||||
|
|
||||||
|
for user_org in org_list.into_iter() {
|
||||||
|
if user_org.atype < UserOrgType::Admin {
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).unwrap();
|
||||||
|
mail::send_2fa_removed_from_org(&user.email, &org.name)?;
|
||||||
|
}
|
||||||
|
user_org.delete(&conn)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": false,
|
"Enabled": false,
|
||||||
"Type": type_,
|
"Type": type_,
|
||||||
|
@@ -28,13 +28,7 @@ static APP_ID: Lazy<String> = Lazy::new(|| format!("{}/app-id.json", &CONFIG.dom
|
|||||||
static U2F: Lazy<U2f> = Lazy::new(|| U2f::new(APP_ID.clone()));
|
static U2F: Lazy<U2f> = Lazy::new(|| U2f::new(APP_ID.clone()));
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![
|
routes![generate_u2f, generate_u2f_challenge, activate_u2f, activate_u2f_put, delete_u2f,]
|
||||||
generate_u2f,
|
|
||||||
generate_u2f_challenge,
|
|
||||||
activate_u2f,
|
|
||||||
activate_u2f_put,
|
|
||||||
delete_u2f,
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-u2f", data = "<data>")]
|
#[post("/two-factor/get-u2f", data = "<data>")]
|
||||||
@@ -100,13 +94,14 @@ struct RegistrationDef {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
struct U2FRegistration {
|
pub struct U2FRegistration {
|
||||||
id: i32,
|
pub id: i32,
|
||||||
name: String,
|
pub name: String,
|
||||||
#[serde(with = "RegistrationDef")]
|
#[serde(with = "RegistrationDef")]
|
||||||
reg: Registration,
|
pub reg: Registration,
|
||||||
counter: u32,
|
pub counter: u32,
|
||||||
compromised: bool,
|
compromised: bool,
|
||||||
|
pub migrated: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl U2FRegistration {
|
impl U2FRegistration {
|
||||||
@@ -131,12 +126,12 @@ struct RegisterResponseCopy {
|
|||||||
pub error_code: Option<NumberOrString>,
|
pub error_code: Option<NumberOrString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Into<RegisterResponse> for RegisterResponseCopy {
|
impl From<RegisterResponseCopy> for RegisterResponse {
|
||||||
fn into(self) -> RegisterResponse {
|
fn from(r: RegisterResponseCopy) -> RegisterResponse {
|
||||||
RegisterResponse {
|
RegisterResponse {
|
||||||
registration_data: self.registration_data,
|
registration_data: r.registration_data,
|
||||||
version: self.version,
|
version: r.version,
|
||||||
client_data: self.client_data,
|
client_data: r.client_data,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -161,10 +156,7 @@ fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn)
|
|||||||
|
|
||||||
let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
|
let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
|
||||||
|
|
||||||
let error_code = response
|
let error_code = response.error_code.clone().map_or("0".into(), NumberOrString::into_string);
|
||||||
.error_code
|
|
||||||
.clone()
|
|
||||||
.map_or("0".into(), NumberOrString::into_string);
|
|
||||||
|
|
||||||
if error_code != "0" {
|
if error_code != "0" {
|
||||||
err!("Error registering U2F token")
|
err!("Error registering U2F token")
|
||||||
@@ -177,6 +169,7 @@ fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn)
|
|||||||
reg: registration,
|
reg: registration,
|
||||||
compromised: false,
|
compromised: false,
|
||||||
counter: 0,
|
counter: 0,
|
||||||
|
migrated: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut regs = get_u2f_registrations(&user.uuid, &conn)?.1;
|
let mut regs = get_u2f_registrations(&user.uuid, &conn)?.1;
|
||||||
@@ -255,7 +248,7 @@ fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult {
|
fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult {
|
||||||
TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(&conn)
|
TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2FRegistration>), Error> {
|
fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2FRegistration>), Error> {
|
||||||
@@ -282,10 +275,11 @@ fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2
|
|||||||
reg: old_regs.remove(0),
|
reg: old_regs.remove(0),
|
||||||
compromised: false,
|
compromised: false,
|
||||||
counter: 0,
|
counter: 0,
|
||||||
|
migrated: None,
|
||||||
}];
|
}];
|
||||||
|
|
||||||
// Save new format
|
// Save new format
|
||||||
save_u2f_registrations(user_uuid, &new_regs, &conn)?;
|
save_u2f_registrations(user_uuid, &new_regs, conn)?;
|
||||||
|
|
||||||
new_regs
|
new_regs
|
||||||
}
|
}
|
||||||
@@ -300,20 +294,13 @@ fn _old_parse_registrations(registations: &str) -> Vec<Registration> {
|
|||||||
|
|
||||||
let regs: Vec<Value> = serde_json::from_str(registations).expect("Can't parse Registration data");
|
let regs: Vec<Value> = serde_json::from_str(registations).expect("Can't parse Registration data");
|
||||||
|
|
||||||
regs.into_iter()
|
regs.into_iter().map(|r| serde_json::from_value(r).unwrap()).map(|Helper(r)| r).collect()
|
||||||
.map(|r| serde_json::from_value(r).unwrap())
|
|
||||||
.map(|Helper(r)| r)
|
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
|
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
|
||||||
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
|
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
|
||||||
|
|
||||||
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?
|
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.reg).collect();
|
||||||
.1
|
|
||||||
.into_iter()
|
|
||||||
.map(|r| r.reg)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if registrations.is_empty() {
|
if registrations.is_empty() {
|
||||||
err!("No U2F devices registered")
|
err!("No U2F devices registered")
|
||||||
@@ -324,12 +311,12 @@ pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRe
|
|||||||
|
|
||||||
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||||
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
|
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
|
||||||
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, &conn);
|
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, conn);
|
||||||
|
|
||||||
let challenge = match tf_challenge {
|
let challenge = match tf_challenge {
|
||||||
Some(tf_challenge) => {
|
Some(tf_challenge) => {
|
||||||
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
|
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
|
||||||
tf_challenge.delete(&conn)?;
|
tf_challenge.delete(conn)?;
|
||||||
challenge
|
challenge
|
||||||
}
|
}
|
||||||
None => err!("Can't recover login challenge"),
|
None => err!("Can't recover login challenge"),
|
||||||
@@ -345,13 +332,13 @@ pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> Emp
|
|||||||
match response {
|
match response {
|
||||||
Ok(new_counter) => {
|
Ok(new_counter) => {
|
||||||
reg.counter = new_counter;
|
reg.counter = new_counter;
|
||||||
save_u2f_registrations(user_uuid, ®istrations, &conn)?;
|
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||||
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
Err(u2f::u2ferror::U2fError::CounterTooLow) => {
|
Err(u2f::u2ferror::U2fError::CounterTooLow) => {
|
||||||
reg.compromised = true;
|
reg.compromised = true;
|
||||||
save_u2f_registrations(user_uuid, ®istrations, &conn)?;
|
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||||
|
|
||||||
err!("This device might be compromised!");
|
err!("This device might be compromised!");
|
||||||
}
|
}
|
||||||
|
386
src/api/core/two_factor/webauthn.rs
Normal file
386
src/api/core/two_factor/webauthn.rs
Normal file
@@ -0,0 +1,386 @@
|
|||||||
|
use rocket::Route;
|
||||||
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value;
|
||||||
|
use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::{
|
||||||
|
core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||||
|
},
|
||||||
|
auth::Headers,
|
||||||
|
db::{
|
||||||
|
models::{TwoFactor, TwoFactorType},
|
||||||
|
DbConn,
|
||||||
|
},
|
||||||
|
error::Error,
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![get_webauthn, generate_webauthn_challenge, activate_webauthn, activate_webauthn_put, delete_webauthn,]
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WebauthnConfig {
|
||||||
|
url: String,
|
||||||
|
rpid: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WebauthnConfig {
|
||||||
|
fn load() -> Webauthn<Self> {
|
||||||
|
let domain = CONFIG.domain();
|
||||||
|
Webauthn::new(Self {
|
||||||
|
rpid: reqwest::Url::parse(&domain)
|
||||||
|
.map(|u| u.domain().map(str::to_owned))
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.unwrap_or_default(),
|
||||||
|
url: domain,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl webauthn_rs::WebauthnConfig for WebauthnConfig {
|
||||||
|
fn get_relying_party_name(&self) -> &str {
|
||||||
|
&self.url
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_origin(&self) -> &str {
|
||||||
|
&self.url
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_relying_party_id(&self) -> &str {
|
||||||
|
&self.rpid
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We have WebAuthn configured to discourage user verification
|
||||||
|
/// if we leave this enabled, it will cause verification issues when a keys send UV=1.
|
||||||
|
/// Upstream (the library they use) ignores this when set to discouraged, so we should too.
|
||||||
|
fn get_require_uv_consistency(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct WebauthnRegistration {
|
||||||
|
pub id: i32,
|
||||||
|
pub name: String,
|
||||||
|
pub migrated: bool,
|
||||||
|
|
||||||
|
pub credential: Credential,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WebauthnRegistration {
|
||||||
|
fn to_json(&self) -> Value {
|
||||||
|
json!({
|
||||||
|
"Id": self.id,
|
||||||
|
"Name": self.name,
|
||||||
|
"migrated": self.migrated,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||||
|
fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
if !CONFIG.domain_set() {
|
||||||
|
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password");
|
||||||
|
}
|
||||||
|
|
||||||
|
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &conn)?;
|
||||||
|
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Enabled": enabled,
|
||||||
|
"Keys": registrations_json,
|
||||||
|
"Object": "twoFactorWebAuthn"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||||
|
fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password");
|
||||||
|
}
|
||||||
|
|
||||||
|
let registrations = get_webauthn_registrations(&headers.user.uuid, &conn)?
|
||||||
|
.1
|
||||||
|
.into_iter()
|
||||||
|
.map(|r| r.credential.cred_id) // We return the credentialIds to the clients to avoid double registering
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
|
||||||
|
headers.user.uuid.as_bytes().to_vec(),
|
||||||
|
headers.user.email,
|
||||||
|
headers.user.name,
|
||||||
|
Some(registrations),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||||
|
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&conn)?;
|
||||||
|
|
||||||
|
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||||
|
challenge_value["status"] = "ok".into();
|
||||||
|
challenge_value["errorMessage"] = "".into();
|
||||||
|
Ok(Json(challenge_value))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct EnableWebauthnData {
|
||||||
|
Id: NumberOrString, // 1..5
|
||||||
|
Name: String,
|
||||||
|
MasterPasswordHash: String,
|
||||||
|
DeviceResponse: RegisterPublicKeyCredentialCopy,
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct RegisterPublicKeyCredentialCopy {
|
||||||
|
pub Id: String,
|
||||||
|
pub RawId: Base64UrlSafeData,
|
||||||
|
pub Response: AuthenticatorAttestationResponseRawCopy,
|
||||||
|
pub Type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub struct AuthenticatorAttestationResponseRawCopy {
|
||||||
|
pub AttestationObject: Base64UrlSafeData,
|
||||||
|
pub ClientDataJson: Base64UrlSafeData,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
|
||||||
|
fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
|
||||||
|
Self {
|
||||||
|
id: r.Id,
|
||||||
|
raw_id: r.RawId,
|
||||||
|
response: AuthenticatorAttestationResponseRaw {
|
||||||
|
attestation_object: r.Response.AttestationObject,
|
||||||
|
client_data_json: r.Response.ClientDataJson,
|
||||||
|
},
|
||||||
|
type_: r.Type,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is copied from PublicKeyCredential to change the Response objects casing
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub struct PublicKeyCredentialCopy {
|
||||||
|
pub Id: String,
|
||||||
|
pub RawId: Base64UrlSafeData,
|
||||||
|
pub Response: AuthenticatorAssertionResponseRawCopy,
|
||||||
|
pub Extensions: Option<AuthenticationExtensionsClientOutputsCopy>,
|
||||||
|
pub Type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub struct AuthenticatorAssertionResponseRawCopy {
|
||||||
|
pub AuthenticatorData: Base64UrlSafeData,
|
||||||
|
pub ClientDataJson: Base64UrlSafeData,
|
||||||
|
pub Signature: Base64UrlSafeData,
|
||||||
|
pub UserHandle: Option<Base64UrlSafeData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub struct AuthenticationExtensionsClientOutputsCopy {
|
||||||
|
#[serde(default)]
|
||||||
|
pub Appid: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||||
|
fn from(r: PublicKeyCredentialCopy) -> Self {
|
||||||
|
Self {
|
||||||
|
id: r.Id,
|
||||||
|
raw_id: r.RawId,
|
||||||
|
response: AuthenticatorAssertionResponseRaw {
|
||||||
|
authenticator_data: r.Response.AuthenticatorData,
|
||||||
|
client_data_json: r.Response.ClientDataJson,
|
||||||
|
signature: r.Response.Signature,
|
||||||
|
user_handle: r.Response.UserHandle,
|
||||||
|
},
|
||||||
|
extensions: r.Extensions.map(|e| AuthenticationExtensionsClientOutputs {
|
||||||
|
appid: e.Appid,
|
||||||
|
}),
|
||||||
|
type_: r.Type,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/two-factor/webauthn", data = "<data>")]
|
||||||
|
fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
let data: EnableWebauthnData = data.into_inner().data;
|
||||||
|
let mut user = headers.user;
|
||||||
|
|
||||||
|
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve and delete the saved challenge state
|
||||||
|
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
||||||
|
let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||||
|
Some(tf) => {
|
||||||
|
let state: RegistrationState = serde_json::from_str(&tf.data)?;
|
||||||
|
tf.delete(&conn)?;
|
||||||
|
state
|
||||||
|
}
|
||||||
|
None => err!("Can't recover challenge"),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Verify the credentials with the saved state
|
||||||
|
let (credential, _data) =
|
||||||
|
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
|
||||||
|
|
||||||
|
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn)?.1;
|
||||||
|
// TODO: Check for repeated ID's
|
||||||
|
registrations.push(WebauthnRegistration {
|
||||||
|
id: data.Id.into_i32()?,
|
||||||
|
name: data.Name,
|
||||||
|
migrated: false,
|
||||||
|
|
||||||
|
credential,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Save the registrations and return them
|
||||||
|
TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?).save(&conn)?;
|
||||||
|
_generate_recover_code(&mut user, &conn);
|
||||||
|
|
||||||
|
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Enabled": true,
|
||||||
|
"Keys": keys_json,
|
||||||
|
"Object": "twoFactorU2f"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/two-factor/webauthn", data = "<data>")]
|
||||||
|
fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
activate_webauthn(data, headers, conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct DeleteU2FData {
|
||||||
|
Id: NumberOrString,
|
||||||
|
MasterPasswordHash: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/two-factor/webauthn", data = "<data>")]
|
||||||
|
fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
let id = data.data.Id.into_i32()?;
|
||||||
|
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &conn) {
|
||||||
|
Some(tf) => tf,
|
||||||
|
None => err!("Webauthn data not found!"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?;
|
||||||
|
|
||||||
|
let item_pos = match data.iter().position(|r| r.id == id) {
|
||||||
|
Some(p) => p,
|
||||||
|
None => err!("Webauthn entry not found"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let removed_item = data.remove(item_pos);
|
||||||
|
tf.data = serde_json::to_string(&data)?;
|
||||||
|
tf.save(&conn)?;
|
||||||
|
drop(tf);
|
||||||
|
|
||||||
|
// If entry is migrated from u2f, delete the u2f entry as well
|
||||||
|
if let Some(mut u2f) = TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &conn) {
|
||||||
|
use crate::api::core::two_factor::u2f::U2FRegistration;
|
||||||
|
let mut data: Vec<U2FRegistration> = match serde_json::from_str(&u2f.data) {
|
||||||
|
Ok(d) => d,
|
||||||
|
Err(_) => err!("Error parsing U2F data"),
|
||||||
|
};
|
||||||
|
|
||||||
|
data.retain(|r| r.reg.key_handle != removed_item.credential.cred_id);
|
||||||
|
let new_data_str = serde_json::to_string(&data)?;
|
||||||
|
|
||||||
|
u2f.data = new_data_str;
|
||||||
|
u2f.save(&conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Enabled": true,
|
||||||
|
"Keys": keys_json,
|
||||||
|
"Object": "twoFactorU2f"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_webauthn_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||||
|
let type_ = TwoFactorType::Webauthn as i32;
|
||||||
|
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||||
|
Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)),
|
||||||
|
None => Ok((false, Vec::new())), // If no data, return empty list
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
|
||||||
|
// Load saved credentials
|
||||||
|
let creds: Vec<Credential> =
|
||||||
|
get_webauthn_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.credential).collect();
|
||||||
|
|
||||||
|
if creds.is_empty() {
|
||||||
|
err!("No Webauthn devices registered")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a challenge based on the credentials
|
||||||
|
let ext = RequestAuthenticationExtensions::builder().appid(format!("{}/app-id.json", &CONFIG.domain())).build();
|
||||||
|
let (response, state) = WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?;
|
||||||
|
|
||||||
|
// Save the challenge state for later validation
|
||||||
|
TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
|
||||||
|
.save(conn)?;
|
||||||
|
|
||||||
|
// Return challenge to the clients
|
||||||
|
Ok(Json(serde_json::to_value(response.public_key)?))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||||
|
let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
|
||||||
|
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||||
|
Some(tf) => {
|
||||||
|
let state: AuthenticationState = serde_json::from_str(&tf.data)?;
|
||||||
|
tf.delete(conn)?;
|
||||||
|
state
|
||||||
|
}
|
||||||
|
None => err!("Can't recover login challenge"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
|
||||||
|
let rsp: PublicKeyCredential = rsp.data.into();
|
||||||
|
|
||||||
|
let mut registrations = get_webauthn_registrations(user_uuid, conn)?.1;
|
||||||
|
|
||||||
|
// If the credential we received is migrated from U2F, enable the U2F compatibility
|
||||||
|
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
|
||||||
|
let (cred_id, auth_data) = WebauthnConfig::load().authenticate_credential(&rsp, &state)?;
|
||||||
|
|
||||||
|
for reg in &mut registrations {
|
||||||
|
if ®.credential.cred_id == cred_id {
|
||||||
|
reg.credential.counter = auth_data.counter;
|
||||||
|
|
||||||
|
TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||||
|
.save(conn)?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err!("Credential not present")
|
||||||
|
}
|
511
src/api/icons.rs
511
src/api/icons.rs
@@ -1,49 +1,105 @@
|
|||||||
use std::{
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||||
io::prelude::*,
|
io::prelude::*,
|
||||||
net::{IpAddr, ToSocketAddrs},
|
net::{IpAddr, ToSocketAddrs},
|
||||||
|
sync::{Arc, RwLock},
|
||||||
time::{Duration, SystemTime},
|
time::{Duration, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use reqwest::{blocking::Client, blocking::Response, header::HeaderMap, Url};
|
use reqwest::{blocking::Client, blocking::Response, header};
|
||||||
use rocket::{http::ContentType, http::Cookie, response::Content, Route};
|
use rocket::{http::ContentType, response::Content, Route};
|
||||||
use soup::prelude::*;
|
|
||||||
|
|
||||||
use crate::{error::Error, util::Cached, CONFIG};
|
use crate::{
|
||||||
|
error::Error,
|
||||||
|
util::{get_reqwest_client_builder, Cached},
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![icon]
|
routes![icon]
|
||||||
}
|
}
|
||||||
|
|
||||||
const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png");
|
|
||||||
|
|
||||||
const ALLOWED_CHARS: &str = "_-.";
|
|
||||||
|
|
||||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||||
|
// Generate the default headers
|
||||||
|
let mut default_headers = header::HeaderMap::new();
|
||||||
|
default_headers
|
||||||
|
.insert(header::USER_AGENT, header::HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)"));
|
||||||
|
default_headers
|
||||||
|
.insert(header::ACCEPT, header::HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1"));
|
||||||
|
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en,*;q=0.1"));
|
||||||
|
default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache"));
|
||||||
|
default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache"));
|
||||||
|
|
||||||
// Reuse the client between requests
|
// Reuse the client between requests
|
||||||
Client::builder()
|
get_reqwest_client_builder()
|
||||||
|
.cookie_provider(Arc::new(Jar::default()))
|
||||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||||
.default_headers(_header_map())
|
.default_headers(default_headers)
|
||||||
.build()
|
.build()
|
||||||
.unwrap()
|
.expect("Failed to build icon client")
|
||||||
});
|
});
|
||||||
|
|
||||||
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"icon$|apple.*icon").unwrap());
|
// Build Regex only once since this takes a lot of time.
|
||||||
static ICON_HREF_REGEX: Lazy<Regex> =
|
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
||||||
Lazy::new(|| Regex::new(r"(?i)\w+\.(jpg|jpeg|png|ico)(\?.*)?$|^data:image.*base64").unwrap());
|
static ICON_REL_BLACKLIST: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)mask-icon").unwrap());
|
||||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||||
|
|
||||||
|
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||||
|
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
|
||||||
|
|
||||||
|
#[get("/<domain>/icon.png")]
|
||||||
|
fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
|
||||||
|
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||||
|
|
||||||
|
if !is_valid_domain(&domain) {
|
||||||
|
warn!("Invalid domain: {}", domain);
|
||||||
|
return Cached::ttl(
|
||||||
|
Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||||
|
CONFIG.icon_cache_negttl(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
match get_icon(&domain) {
|
||||||
|
Some((icon, icon_type)) => {
|
||||||
|
Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl())
|
||||||
|
}
|
||||||
|
_ => Cached::ttl(Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns if the domain provided is valid or not.
|
||||||
|
///
|
||||||
|
/// This does some manual checks and makes use of Url to do some basic checking.
|
||||||
|
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
|
||||||
fn is_valid_domain(domain: &str) -> bool {
|
fn is_valid_domain(domain: &str) -> bool {
|
||||||
// Don't allow empty or too big domains or path traversal
|
const ALLOWED_CHARS: &str = "_-.";
|
||||||
if domain.is_empty() || domain.len() > 255 || domain.contains("..") {
|
|
||||||
|
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||||
|
if let Err(parse_error) = url::Url::parse(format!("https://{}", domain).as_str()) {
|
||||||
|
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
||||||
|
return false;
|
||||||
|
} else if domain.is_empty()
|
||||||
|
|| domain.contains("..")
|
||||||
|
|| domain.starts_with('.')
|
||||||
|
|| domain.starts_with('-')
|
||||||
|
|| domain.ends_with('-')
|
||||||
|
{
|
||||||
|
debug!(
|
||||||
|
"Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'",
|
||||||
|
domain
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
} else if domain.len() > 255 {
|
||||||
|
debug!("Domain validation error: '{}' exceeds 255 characters", domain);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only alphanumeric or specific characters
|
|
||||||
for c in domain.chars() {
|
for c in domain.chars() {
|
||||||
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
||||||
|
debug!("Domain validation error: '{}' contains an invalid character '{}'", domain, c);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -51,21 +107,10 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/<domain>/icon.png")]
|
|
||||||
fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
|
|
||||||
let icon_type = ContentType::new("image", "x-icon");
|
|
||||||
|
|
||||||
if !is_valid_domain(&domain) {
|
|
||||||
warn!("Invalid domain: {:#?}", domain);
|
|
||||||
return Cached::long(Content(icon_type, FALLBACK_ICON.to_vec()));
|
|
||||||
}
|
|
||||||
|
|
||||||
Cached::long(Content(icon_type, get_icon(&domain)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
||||||
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
||||||
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
||||||
|
#[allow(clippy::nonminimal_bool)]
|
||||||
#[cfg(not(feature = "unstable"))]
|
#[cfg(not(feature = "unstable"))]
|
||||||
fn is_global(ip: IpAddr) -> bool {
|
fn is_global(ip: IpAddr) -> bool {
|
||||||
match ip {
|
match ip {
|
||||||
@@ -161,7 +206,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
fn is_domain_blacklisted(domain: &str) -> bool {
|
||||||
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
|
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
|
||||||
&& (domain, 0)
|
&& (domain, 0)
|
||||||
.to_socket_addrs()
|
.to_socket_addrs()
|
||||||
@@ -179,8 +224,32 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
|||||||
// Skip the regex check if the previous one is true already
|
// Skip the regex check if the previous one is true already
|
||||||
if !is_blacklisted {
|
if !is_blacklisted {
|
||||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||||
let regex = Regex::new(&blacklist).expect("Valid Regex");
|
let mut regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||||
if regex.is_match(&domain) {
|
|
||||||
|
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
||||||
|
let regex = if let Some(regex) = regex_hashmap.get(&blacklist) {
|
||||||
|
regex
|
||||||
|
} else {
|
||||||
|
drop(regex_hashmap);
|
||||||
|
|
||||||
|
let mut regex_hashmap_write = ICON_BLACKLIST_REGEX.write().unwrap();
|
||||||
|
// Clear the current list if the previous key doesn't exists.
|
||||||
|
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
||||||
|
if regex_hashmap_write.len() >= 1 {
|
||||||
|
regex_hashmap_write.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the regex to store in too the Lazy Static HashMap.
|
||||||
|
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
||||||
|
regex_hashmap_write.insert(blacklist.to_string(), blacklist_regex);
|
||||||
|
drop(regex_hashmap_write);
|
||||||
|
|
||||||
|
regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||||
|
regex_hashmap.get(&blacklist).unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use the pre-generate Regex stored in a Lazy HashMap.
|
||||||
|
if regex.is_match(domain) {
|
||||||
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
|
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
|
||||||
is_blacklisted = true;
|
is_blacklisted = true;
|
||||||
}
|
}
|
||||||
@@ -190,39 +259,42 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
|||||||
is_blacklisted
|
is_blacklisted
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_icon(domain: &str) -> Vec<u8> {
|
fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||||
|
|
||||||
|
// Check for expiration of negatively cached copy
|
||||||
|
if icon_is_negcached(&path) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(icon) = get_cached_icon(&path) {
|
if let Some(icon) = get_cached_icon(&path) {
|
||||||
return icon;
|
let icon_type = match get_icon_type(&icon) {
|
||||||
|
Some(x) => x,
|
||||||
|
_ => "x-icon",
|
||||||
|
};
|
||||||
|
return Some((icon, icon_type.to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.disable_icon_download() {
|
if CONFIG.disable_icon_download() {
|
||||||
return FALLBACK_ICON.to_vec();
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the icon, or fallback in case of error
|
// Get the icon, or None in case of error
|
||||||
match download_icon(&domain) {
|
match download_icon(domain) {
|
||||||
Ok(icon) => {
|
Ok((icon, icon_type)) => {
|
||||||
save_icon(&path, &icon);
|
save_icon(&path, &icon);
|
||||||
icon
|
Some((icon, icon_type.unwrap_or("x-icon").to_string()))
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Error downloading icon: {:?}", e);
|
error!("Error downloading icon: {:?}", e);
|
||||||
let miss_indicator = path + ".miss";
|
let miss_indicator = path + ".miss";
|
||||||
let empty_icon = Vec::new();
|
save_icon(&miss_indicator, &[]);
|
||||||
save_icon(&miss_indicator, &empty_icon);
|
None
|
||||||
FALLBACK_ICON.to_vec()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||||
// Check for expiration of negatively cached copy
|
|
||||||
if icon_is_negcached(path) {
|
|
||||||
return Some(FALLBACK_ICON.to_vec());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for expiration of successfully cached copy
|
// Check for expiration of successfully cached copy
|
||||||
if icon_is_expired(path) {
|
if icon_is_expired(path) {
|
||||||
return None;
|
return None;
|
||||||
@@ -272,7 +344,6 @@ fn icon_is_expired(path: &str) -> bool {
|
|||||||
expired.unwrap_or(true)
|
expired.unwrap_or(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct Icon {
|
struct Icon {
|
||||||
priority: u8,
|
priority: u8,
|
||||||
href: String,
|
href: String,
|
||||||
@@ -280,12 +351,108 @@ struct Icon {
|
|||||||
|
|
||||||
impl Icon {
|
impl Icon {
|
||||||
const fn new(priority: u8, href: String) -> Self {
|
const fn new(priority: u8, href: String) -> Self {
|
||||||
Self { href, priority }
|
Self {
|
||||||
|
priority,
|
||||||
|
href,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
|
/// Iterates over the HTML document to find <base href="http://domain.tld">
|
||||||
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
|
/// When found it will stop the iteration and the found base href will be shared deref via `base_href`.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `node` - A Parsed HTML document via html5ever::parse_document()
|
||||||
|
/// * `base_href` - a mutable url::Url which will be overwritten when a base href tag has been found.
|
||||||
|
///
|
||||||
|
fn get_base_href(node: &std::rc::Rc<markup5ever_rcdom::Node>, base_href: &mut url::Url) -> bool {
|
||||||
|
if let markup5ever_rcdom::NodeData::Element {
|
||||||
|
name,
|
||||||
|
attrs,
|
||||||
|
..
|
||||||
|
} = &node.data
|
||||||
|
{
|
||||||
|
if name.local.as_ref() == "base" {
|
||||||
|
let attrs = attrs.borrow();
|
||||||
|
for attr in attrs.iter() {
|
||||||
|
let attr_name = attr.name.local.as_ref();
|
||||||
|
let attr_value = attr.value.as_ref();
|
||||||
|
|
||||||
|
if attr_name == "href" {
|
||||||
|
debug!("Found base href: {}", attr_value);
|
||||||
|
*base_href = match base_href.join(attr_value) {
|
||||||
|
Ok(href) => href,
|
||||||
|
_ => base_href.clone(),
|
||||||
|
};
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Might want to limit the recursion depth?
|
||||||
|
for child in node.children.borrow().iter() {
|
||||||
|
// Check if we got a true back and stop the iter.
|
||||||
|
// This means we found a <base> tag and can stop processing the html.
|
||||||
|
if get_base_href(child, base_href) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Vec<Icon>, url: &url::Url) {
|
||||||
|
if let markup5ever_rcdom::NodeData::Element {
|
||||||
|
name,
|
||||||
|
attrs,
|
||||||
|
..
|
||||||
|
} = &node.data
|
||||||
|
{
|
||||||
|
if name.local.as_ref() == "link" {
|
||||||
|
let mut has_rel = false;
|
||||||
|
let mut href = None;
|
||||||
|
let mut sizes = None;
|
||||||
|
|
||||||
|
let attrs = attrs.borrow();
|
||||||
|
for attr in attrs.iter() {
|
||||||
|
let attr_name = attr.name.local.as_ref();
|
||||||
|
let attr_value = attr.value.as_ref();
|
||||||
|
|
||||||
|
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) && !ICON_REL_BLACKLIST.is_match(attr_value)
|
||||||
|
{
|
||||||
|
has_rel = true;
|
||||||
|
} else if attr_name == "href" {
|
||||||
|
href = Some(attr_value);
|
||||||
|
} else if attr_name == "sizes" {
|
||||||
|
sizes = Some(attr_value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if has_rel {
|
||||||
|
if let Some(inner_href) = href {
|
||||||
|
if let Ok(full_href) = url.join(inner_href).map(String::from) {
|
||||||
|
let priority = get_icon_priority(&full_href, sizes);
|
||||||
|
icons.push(Icon::new(priority, full_href));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Might want to limit the recursion depth?
|
||||||
|
for child in node.children.borrow().iter() {
|
||||||
|
get_favicons_node(child, icons, url);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct IconUrlResult {
|
||||||
|
iconlist: Vec<Icon>,
|
||||||
|
referer: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a IconUrlResult which holds a Vector IconList and a string which holds the referer.
|
||||||
|
/// There will always two items within the iconlist which holds http(s)://domain.tld/favicon.ico.
|
||||||
/// This does not mean that that location does exists, but it is the default location browser use.
|
/// This does not mean that that location does exists, but it is the default location browser use.
|
||||||
///
|
///
|
||||||
/// # Argument
|
/// # Argument
|
||||||
@@ -293,65 +460,79 @@ impl Icon {
|
|||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
/// ```
|
/// ```
|
||||||
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
|
/// let icon_result = get_icon_url("github.com")?;
|
||||||
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
|
/// let icon_result = get_icon_url("vaultwarden.discourse.group")?;
|
||||||
/// ```
|
/// ```
|
||||||
fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||||
// Default URL with secure and insecure schemes
|
// Default URL with secure and insecure schemes
|
||||||
let ssldomain = format!("https://{}", domain);
|
let ssldomain = format!("https://{}", domain);
|
||||||
let httpdomain = format!("http://{}", domain);
|
let httpdomain = format!("http://{}", domain);
|
||||||
|
|
||||||
|
// First check the domain as given during the request for both HTTPS and HTTP.
|
||||||
|
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)) {
|
||||||
|
Ok(c) => Ok(c),
|
||||||
|
Err(e) => {
|
||||||
|
let mut sub_resp = Err(e);
|
||||||
|
|
||||||
|
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
||||||
|
let is_ip = domain.parse::<IpAddr>();
|
||||||
|
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
||||||
|
let mut domain_parts = domain.split('.');
|
||||||
|
let base_domain = format!(
|
||||||
|
"{base}.{tld}",
|
||||||
|
tld = domain_parts.next_back().unwrap(),
|
||||||
|
base = domain_parts.next_back().unwrap()
|
||||||
|
);
|
||||||
|
if is_valid_domain(&base_domain) {
|
||||||
|
let sslbase = format!("https://{}", base_domain);
|
||||||
|
let httpbase = format!("http://{}", base_domain);
|
||||||
|
debug!("[get_icon_url]: Trying without subdomains '{}'", base_domain);
|
||||||
|
|
||||||
|
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase));
|
||||||
|
}
|
||||||
|
|
||||||
|
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||||
|
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||||
|
let www_domain = format!("www.{}", domain);
|
||||||
|
if is_valid_domain(&www_domain) {
|
||||||
|
let sslwww = format!("https://{}", www_domain);
|
||||||
|
let httpwww = format!("http://{}", www_domain);
|
||||||
|
debug!("[get_icon_url]: Trying with www. prefix '{}'", www_domain);
|
||||||
|
|
||||||
|
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sub_resp
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Create the iconlist
|
// Create the iconlist
|
||||||
let mut iconlist: Vec<Icon> = Vec::new();
|
let mut iconlist: Vec<Icon> = Vec::new();
|
||||||
|
let mut referer = String::from("");
|
||||||
|
|
||||||
// Create the cookie_str to fill it all the cookies from the response
|
|
||||||
// These cookies can be used to request/download the favicon image.
|
|
||||||
// Some sites have extra security in place with for example XSRF Tokens.
|
|
||||||
let mut cookie_str = String::new();
|
|
||||||
|
|
||||||
let resp = get_page(&ssldomain).or_else(|_| get_page(&httpdomain));
|
|
||||||
if let Ok(content) = resp {
|
if let Ok(content) = resp {
|
||||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
||||||
let url = content.url().clone();
|
let url = content.url().clone();
|
||||||
|
|
||||||
let raw_cookies = content.headers().get_all("set-cookie");
|
// Set the referer to be used on the final request, some sites check this.
|
||||||
cookie_str = raw_cookies
|
// Mostly used to prevent direct linking and other security resons.
|
||||||
.iter()
|
referer = url.as_str().to_string();
|
||||||
.filter_map(|raw_cookie| raw_cookie.to_str().ok())
|
|
||||||
.map(|cookie_str| {
|
|
||||||
if let Ok(cookie) = Cookie::parse(cookie_str) {
|
|
||||||
format!("{}={}; ", cookie.name(), cookie.value())
|
|
||||||
} else {
|
|
||||||
String::new()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<String>();
|
|
||||||
|
|
||||||
// Add the default favicon.ico to the list with the domain the content responded from.
|
// Add the default favicon.ico to the list with the domain the content responded from.
|
||||||
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
|
iconlist.push(Icon::new(35, String::from(url.join("/favicon.ico").unwrap())));
|
||||||
|
|
||||||
// 512KB should be more than enough for the HTML, though as we only really need
|
// 384KB should be more than enough for the HTML, though as we only really need the HTML header.
|
||||||
// the HTML header, it could potentially be reduced even further
|
let mut limited_reader = content.take(384 * 1024);
|
||||||
let limited_reader = content.take(512 * 1024);
|
|
||||||
|
|
||||||
let soup = Soup::from_reader(limited_reader)?;
|
use html5ever::tendril::TendrilSink;
|
||||||
// Search for and filter
|
let dom = html5ever::parse_document(markup5ever_rcdom::RcDom::default(), Default::default())
|
||||||
let favicons = soup
|
.from_utf8()
|
||||||
.tag("link")
|
.read_from(&mut limited_reader)?;
|
||||||
.attr("rel", ICON_REL_REGEX.clone()) // Only use icon rels
|
|
||||||
.attr("href", ICON_HREF_REGEX.clone()) // Only allow specific extensions
|
|
||||||
.find_all();
|
|
||||||
|
|
||||||
// Loop through all the found icons and determine it's priority
|
let mut base_url: url::Url = url;
|
||||||
for favicon in favicons {
|
get_base_href(&dom.document, &mut base_url);
|
||||||
let sizes = favicon.get("sizes");
|
get_favicons_node(&dom.document, &mut iconlist, &base_url);
|
||||||
let href = favicon.get("href").expect("Missing href");
|
|
||||||
let full_href = url.join(&href).unwrap().into_string();
|
|
||||||
|
|
||||||
let priority = get_icon_priority(&full_href, sizes);
|
|
||||||
|
|
||||||
iconlist.push(Icon::new(priority, full_href))
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Add the default favicon.ico to the list with just the given domain
|
// Add the default favicon.ico to the list with just the given domain
|
||||||
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
|
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
|
||||||
@@ -362,28 +543,27 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
|||||||
iconlist.sort_by_key(|x| x.priority);
|
iconlist.sort_by_key(|x| x.priority);
|
||||||
|
|
||||||
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
||||||
Ok((iconlist, cookie_str))
|
Ok(IconUrlResult {
|
||||||
|
iconlist,
|
||||||
|
referer,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_page(url: &str) -> Result<Response, Error> {
|
fn get_page(url: &str) -> Result<Response, Error> {
|
||||||
get_page_with_cookies(url, "")
|
get_page_with_referer(url, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> {
|
fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||||
if check_icon_domain_is_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||||
err!("Favicon rel linked to a non blacklisted domain!");
|
err!("Favicon rel linked to a blacklisted domain!");
|
||||||
}
|
}
|
||||||
|
|
||||||
if cookie_str.is_empty() {
|
let mut client = CLIENT.get(url);
|
||||||
CLIENT.get(url).send()?.error_for_status().map_err(Into::into)
|
if !referer.is_empty() {
|
||||||
} else {
|
client = client.header("Referer", referer)
|
||||||
CLIENT
|
|
||||||
.get(url)
|
|
||||||
.header("cookie", cookie_str)
|
|
||||||
.send()?
|
|
||||||
.error_for_status()
|
|
||||||
.map_err(Into::into)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
client.send()?.error_for_status().map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||||
@@ -398,7 +578,7 @@ fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error>
|
|||||||
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
|
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
|
||||||
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||||
/// ```
|
/// ```
|
||||||
fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 {
|
||||||
// Check if there is a dimension set
|
// Check if there is a dimension set
|
||||||
let (width, height) = parse_sizes(sizes);
|
let (width, height) = parse_sizes(sizes);
|
||||||
|
|
||||||
@@ -411,7 +591,7 @@ fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
|||||||
1
|
1
|
||||||
} else if width == 64 {
|
} else if width == 64 {
|
||||||
2
|
2
|
||||||
} else if width >= 24 && width <= 128 {
|
} else if (24..=192).contains(&width) {
|
||||||
3
|
3
|
||||||
} else if width == 16 {
|
} else if width == 16 {
|
||||||
4
|
4
|
||||||
@@ -446,7 +626,7 @@ fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
|||||||
/// let (width, height) = parse_sizes("x128x128"); // (128, 128)
|
/// let (width, height) = parse_sizes("x128x128"); // (128, 128)
|
||||||
/// let (width, height) = parse_sizes("32"); // (0, 0)
|
/// let (width, height) = parse_sizes("32"); // (0, 0)
|
||||||
/// ```
|
/// ```
|
||||||
fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
|
||||||
let mut width: u16 = 0;
|
let mut width: u16 = 0;
|
||||||
let mut height: u16 = 0;
|
let mut height: u16 = 0;
|
||||||
|
|
||||||
@@ -465,18 +645,19 @@ fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
|||||||
(width, height)
|
(width, height)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||||
if check_icon_domain_is_blacklisted(domain) {
|
if is_domain_blacklisted(domain) {
|
||||||
err!("Domain is blacklisted", domain)
|
err!("Domain is blacklisted", domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
let (iconlist, cookie_str) = get_icon_url(&domain)?;
|
let icon_result = get_icon_url(domain)?;
|
||||||
|
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
|
let mut icon_type: Option<&str> = None;
|
||||||
|
|
||||||
use data_url::DataUrl;
|
use data_url::DataUrl;
|
||||||
|
|
||||||
for icon in iconlist.iter().take(5) {
|
for icon in icon_result.iconlist.iter().take(5) {
|
||||||
if icon.href.starts_with("data:image") {
|
if icon.href.starts_with("data:image") {
|
||||||
let datauri = DataUrl::process(&icon.href).unwrap();
|
let datauri = DataUrl::process(&icon.href).unwrap();
|
||||||
// Check if we are able to decode the data uri
|
// Check if we are able to decode the data uri
|
||||||
@@ -484,29 +665,43 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
|||||||
Ok((body, _fragment)) => {
|
Ok((body, _fragment)) => {
|
||||||
// Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create
|
// Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create
|
||||||
if body.len() >= 67 {
|
if body.len() >= 67 {
|
||||||
|
// Check if the icon type is allowed, else try an icon from the list.
|
||||||
|
icon_type = get_icon_type(&body);
|
||||||
|
if icon_type.is_none() {
|
||||||
|
debug!("Icon from {} data:image uri, is not a valid image type", domain);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
info!("Extracted icon from data:image uri for {}", domain);
|
||||||
buffer = body;
|
buffer = body;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => warn!("data uri is invalid"),
|
_ => warn!("Extracted icon from data:image uri is invalid"),
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
match get_page_with_cookies(&icon.href, &cookie_str) {
|
match get_page_with_referer(&icon.href, &icon_result.referer) {
|
||||||
Ok(mut res) => {
|
Ok(mut res) => {
|
||||||
info!("Downloaded icon from {}", icon.href);
|
|
||||||
res.copy_to(&mut buffer)?;
|
res.copy_to(&mut buffer)?;
|
||||||
|
// Check if the icon type is allowed, else try an icon from the list.
|
||||||
|
icon_type = get_icon_type(&buffer);
|
||||||
|
if icon_type.is_none() {
|
||||||
|
buffer.clear();
|
||||||
|
debug!("Icon from {}, is not a valid image type", icon.href);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
info!("Downloaded icon from {}", icon.href);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(_) => info!("Download failed for {}", icon.href),
|
_ => warn!("Download failed for {}", icon.href),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if buffer.is_empty() {
|
if buffer.is_empty() {
|
||||||
err!("Empty response")
|
err!("Empty response downloading icon")
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(buffer)
|
Ok((buffer, icon_type))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_icon(path: &str, icon: &[u8]) {
|
fn save_icon(path: &str, icon: &[u8]) {
|
||||||
@@ -518,29 +713,65 @@ fn save_icon(path: &str, icon: &[u8]) {
|
|||||||
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
|
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
info!("Icon save error: {:?}", e);
|
warn!("Icon save error: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _header_map() -> HeaderMap {
|
fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||||
// Set some default headers for the request.
|
match bytes {
|
||||||
// Use a browser like user-agent to make sure most websites will return there correct website.
|
[137, 80, 78, 71, ..] => Some("png"),
|
||||||
use reqwest::header::*;
|
[0, 0, 1, 0, ..] => Some("x-icon"),
|
||||||
|
[82, 73, 70, 70, ..] => Some("webp"),
|
||||||
macro_rules! headers {
|
[255, 216, 255, ..] => Some("jpeg"),
|
||||||
($( $name:ident : $value:literal),+ $(,)? ) => {
|
[71, 73, 70, 56, ..] => Some("gif"),
|
||||||
let mut headers = HeaderMap::new();
|
[66, 77, ..] => Some("bmp"),
|
||||||
$( headers.insert($name, HeaderValue::from_static($value)); )+
|
_ => None,
|
||||||
headers
|
}
|
||||||
};
|
}
|
||||||
}
|
|
||||||
|
/// This is an implementation of the default Cookie Jar from Reqwest and reqwest_cookie_store build by pfernie.
|
||||||
headers! {
|
/// The default cookie jar used by Reqwest keeps all the cookies based upon the Max-Age or Expires which could be a long time.
|
||||||
USER_AGENT: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
|
/// That could be used for tracking, to prevent this we force the lifespan of the cookies to always be max two minutes.
|
||||||
ACCEPT_LANGUAGE: "en-US,en;q=0.8",
|
/// A Cookie Jar is needed because some sites force a redirect with cookies to verify if a request uses cookies or not.
|
||||||
CACHE_CONTROL: "no-cache",
|
use cookie_store::CookieStore;
|
||||||
PRAGMA: "no-cache",
|
#[derive(Default)]
|
||||||
ACCEPT: "text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8",
|
pub struct Jar(RwLock<CookieStore>);
|
||||||
|
|
||||||
|
impl reqwest::cookie::CookieStore for Jar {
|
||||||
|
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &header::HeaderValue>, url: &url::Url) {
|
||||||
|
use cookie::{Cookie as RawCookie, ParseError as RawCookieParseError};
|
||||||
|
use time::Duration;
|
||||||
|
|
||||||
|
let mut cookie_store = self.0.write().unwrap();
|
||||||
|
let cookies = cookie_headers.filter_map(|val| {
|
||||||
|
std::str::from_utf8(val.as_bytes())
|
||||||
|
.map_err(RawCookieParseError::from)
|
||||||
|
.and_then(RawCookie::parse)
|
||||||
|
.map(|mut c| {
|
||||||
|
c.set_expires(None);
|
||||||
|
c.set_max_age(Some(Duration::minutes(2)));
|
||||||
|
c.into_owned()
|
||||||
|
})
|
||||||
|
.ok()
|
||||||
|
});
|
||||||
|
cookie_store.store_response_cookies(cookies, url);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cookies(&self, url: &url::Url) -> Option<header::HeaderValue> {
|
||||||
|
use bytes::Bytes;
|
||||||
|
|
||||||
|
let cookie_store = self.0.read().unwrap();
|
||||||
|
let s = cookie_store
|
||||||
|
.get_request_values(url)
|
||||||
|
.map(|(name, value)| format!("{}={}", name, value))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("; ");
|
||||||
|
|
||||||
|
if s.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -72,7 +72,8 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
|||||||
"Kdf": user.client_kdf_type,
|
"Kdf": user.client_kdf_type,
|
||||||
"KdfIterations": user.client_kdf_iter,
|
"KdfIterations": user.client_kdf_iter,
|
||||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||||
"scope": "api offline_access"
|
"scope": "api offline_access",
|
||||||
|
"unofficialServer": true,
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,26 +88,28 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
|||||||
let username = data.username.as_ref().unwrap();
|
let username = data.username.as_ref().unwrap();
|
||||||
let user = match User::find_by_mail(username, &conn) {
|
let user = match User::find_by_mail(username, &conn) {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err!(
|
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
|
||||||
"Username or password is incorrect. Try again",
|
|
||||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
|
||||||
),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check password
|
// Check password
|
||||||
let password = data.password.as_ref().unwrap();
|
let password = data.password.as_ref().unwrap();
|
||||||
if !user.check_valid_password(password) {
|
if !user.check_valid_password(password) {
|
||||||
err!(
|
err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||||
"Username or password is incorrect. Try again",
|
}
|
||||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
|
||||||
)
|
// Check if the user is disabled
|
||||||
|
if !user.enabled {
|
||||||
|
err!("This user has been disabled", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||||
}
|
}
|
||||||
|
|
||||||
let now = Local::now();
|
let now = Local::now();
|
||||||
|
|
||||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||||
let now = now.naive_utc();
|
let now = now.naive_utc();
|
||||||
if user.last_verifying_at.is_none() || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() > CONFIG.signups_verify_resend_time() as i64 {
|
if user.last_verifying_at.is_none()
|
||||||
|
|| now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds()
|
||||||
|
> CONFIG.signups_verify_resend_time() as i64
|
||||||
|
{
|
||||||
let resend_limit = CONFIG.signups_verify_resend_limit() as i32;
|
let resend_limit = CONFIG.signups_verify_resend_limit() as i32;
|
||||||
if resend_limit == 0 || user.login_verify_count < resend_limit {
|
if resend_limit == 0 || user.login_verify_count < resend_limit {
|
||||||
// We want to send another email verification if we require signups to verify
|
// We want to send another email verification if we require signups to verify
|
||||||
@@ -126,15 +129,12 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We still want the login to fail until they actually verified the email address
|
// We still want the login to fail until they actually verified the email address
|
||||||
err!(
|
err!("Please verify your email before trying again.", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||||
"Please verify your email before trying again.",
|
|
||||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut device, new_device) = get_device(&data, &conn, &user);
|
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||||
|
|
||||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &ip, &conn)?;
|
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, &conn)?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() && new_device {
|
if CONFIG.mail_enabled() && new_device {
|
||||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
|
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
|
||||||
@@ -160,11 +160,12 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
|||||||
"Key": user.akey,
|
"Key": user.akey,
|
||||||
"PrivateKey": user.private_key,
|
"PrivateKey": user.private_key,
|
||||||
//"TwoFactorToken": "11122233333444555666777888999"
|
//"TwoFactorToken": "11122233333444555666777888999"
|
||||||
|
|
||||||
"Kdf": user.client_kdf_type,
|
"Kdf": user.client_kdf_type,
|
||||||
"KdfIterations": user.client_kdf_iter,
|
"KdfIterations": user.client_kdf_iter,
|
||||||
"ResetMasterPassword": false,// TODO: Same as above
|
"ResetMasterPassword": false,// TODO: Same as above
|
||||||
"scope": "api offline_access"
|
"scope": "api offline_access",
|
||||||
|
"unofficialServer": true,
|
||||||
});
|
});
|
||||||
|
|
||||||
if let Some(token) = twofactor_token {
|
if let Some(token) = twofactor_token {
|
||||||
@@ -184,7 +185,7 @@ fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool)
|
|||||||
|
|
||||||
let mut new_device = false;
|
let mut new_device = false;
|
||||||
// Find device or create new
|
// Find device or create new
|
||||||
let device = match Device::find_by_uuid(&device_id, &conn) {
|
let device = match Device::find_by_uuid(&device_id, conn) {
|
||||||
Some(device) => {
|
Some(device) => {
|
||||||
// Check if owned device, and recreate if not
|
// Check if owned device, and recreate if not
|
||||||
if device.user_uuid != user.uuid {
|
if device.user_uuid != user.uuid {
|
||||||
@@ -226,9 +227,7 @@ fn twofactor_auth(
|
|||||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA token not provided"),
|
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA token not provided"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let selected_twofactor = twofactors
|
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||||
.into_iter()
|
|
||||||
.find(|tf| tf.atype == selected_id && tf.enabled);
|
|
||||||
|
|
||||||
use crate::api::core::two_factor as _tf;
|
use crate::api::core::two_factor as _tf;
|
||||||
use crate::crypto::ct_eq;
|
use crate::crypto::ct_eq;
|
||||||
@@ -237,18 +236,27 @@ fn twofactor_auth(
|
|||||||
let mut remember = data.two_factor_remember.unwrap_or(0);
|
let mut remember = data.two_factor_remember.unwrap_or(0);
|
||||||
|
|
||||||
match TwoFactorType::from_i32(selected_id) {
|
match TwoFactorType::from_i32(selected_id) {
|
||||||
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?,
|
Some(TwoFactorType::Authenticator) => {
|
||||||
|
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?
|
||||||
|
}
|
||||||
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
||||||
|
Some(TwoFactorType::Webauthn) => _tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn)?,
|
||||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
||||||
Some(TwoFactorType::Duo) => _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
|
Some(TwoFactorType::Duo) => {
|
||||||
Some(TwoFactorType::Email) => _tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?,
|
_tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?
|
||||||
|
}
|
||||||
|
Some(TwoFactorType::Email) => {
|
||||||
|
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?
|
||||||
|
}
|
||||||
|
|
||||||
Some(TwoFactorType::Remember) => {
|
Some(TwoFactorType::Remember) => {
|
||||||
match device.twofactor_remember {
|
match device.twofactor_remember {
|
||||||
Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
|
Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
|
||||||
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
|
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
|
||||||
}
|
}
|
||||||
_ => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA Remember token not provided"),
|
_ => {
|
||||||
|
err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA Remember token not provided")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => err!("Invalid two factor provider"),
|
_ => err!("Invalid two factor provider"),
|
||||||
@@ -302,8 +310,13 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||||
|
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn)?;
|
||||||
|
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||||
|
}
|
||||||
|
|
||||||
Some(TwoFactorType::Duo) => {
|
Some(TwoFactorType::Duo) => {
|
||||||
let email = match User::find_by_uuid(user_uuid, &conn) {
|
let email = match User::find_by_uuid(user_uuid, conn) {
|
||||||
Some(u) => u.email,
|
Some(u) => u.email,
|
||||||
None => err!("User does not exist"),
|
None => err!("User does not exist"),
|
||||||
};
|
};
|
||||||
@@ -317,7 +330,7 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
|||||||
}
|
}
|
||||||
|
|
||||||
Some(tf_type @ TwoFactorType::YubiKey) => {
|
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, &conn) {
|
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) {
|
||||||
Some(tf) => tf,
|
Some(tf) => tf,
|
||||||
None => err!("No YubiKey devices registered"),
|
None => err!("No YubiKey devices registered"),
|
||||||
};
|
};
|
||||||
@@ -332,14 +345,14 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
|||||||
Some(tf_type @ TwoFactorType::Email) => {
|
Some(tf_type @ TwoFactorType::Email) => {
|
||||||
use crate::api::core::two_factor as _tf;
|
use crate::api::core::two_factor as _tf;
|
||||||
|
|
||||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, &conn) {
|
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) {
|
||||||
Some(tf) => tf,
|
Some(tf) => tf,
|
||||||
None => err!("No twofactor email registered"),
|
None => err!("No twofactor email registered"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Send email immediately if email is the only 2FA option
|
// Send email immediately if email is the only 2FA option
|
||||||
if providers.len() == 1 {
|
if providers.len() == 1 {
|
||||||
_tf::email::send_token(&user_uuid, &conn)?
|
_tf::email::send_token(user_uuid, conn)?
|
||||||
}
|
}
|
||||||
|
|
||||||
let email_data = EmailTokenData::from_json(&twofactor.data)?;
|
let email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||||
|
@@ -10,6 +10,8 @@ use serde_json::Value;
|
|||||||
|
|
||||||
pub use crate::api::{
|
pub use crate::api::{
|
||||||
admin::routes as admin_routes,
|
admin::routes as admin_routes,
|
||||||
|
core::purge_sends,
|
||||||
|
core::purge_trashed_ciphers,
|
||||||
core::routes as core_routes,
|
core::routes as core_routes,
|
||||||
icons::routes as icons_routes,
|
icons::routes as icons_routes,
|
||||||
identity::routes as identity_routes,
|
identity::routes as identity_routes,
|
||||||
@@ -49,13 +51,14 @@ impl NumberOrString {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn into_i32(self) -> ApiResult<i32> {
|
#[allow(clippy::wrong_self_convention)]
|
||||||
|
fn into_i32(&self) -> ApiResult<i32> {
|
||||||
use std::num::ParseIntError as PIE;
|
use std::num::ParseIntError as PIE;
|
||||||
match self {
|
match self {
|
||||||
NumberOrString::Number(n) => Ok(n),
|
NumberOrString::Number(n) => Ok(*n),
|
||||||
NumberOrString::String(s) => s
|
NumberOrString::String(s) => {
|
||||||
.parse()
|
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
||||||
.map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())),
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -4,12 +4,7 @@ use rocket::Route;
|
|||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
use serde_json::Value as JsonValue;
|
use serde_json::Value as JsonValue;
|
||||||
|
|
||||||
use crate::{
|
use crate::{api::EmptyResult, auth::Headers, db::DbConn, Error, CONFIG};
|
||||||
api::{EmptyResult, JsonResult},
|
|
||||||
auth::Headers,
|
|
||||||
db::DbConn,
|
|
||||||
Error, CONFIG,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![negotiate, websockets_err]
|
routes![negotiate, websockets_err]
|
||||||
@@ -19,12 +14,15 @@ static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
|||||||
|
|
||||||
#[get("/hub")]
|
#[get("/hub")]
|
||||||
fn websockets_err() -> EmptyResult {
|
fn websockets_err() -> EmptyResult {
|
||||||
if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_and_swap(true, false, Ordering::Relaxed) {
|
if CONFIG.websocket_enabled()
|
||||||
|
&& SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok()
|
||||||
|
{
|
||||||
err!(
|
err!(
|
||||||
"###########################################################
|
"
|
||||||
|
###########################################################
|
||||||
'/notifications/hub' should be proxied to the websocket server or notifications won't work.
|
'/notifications/hub' should be proxied to the websocket server or notifications won't work.
|
||||||
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
|
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
|
||||||
###########################################################################################"
|
###########################################################################################\n"
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::empty())
|
Err(Error::empty())
|
||||||
@@ -32,7 +30,7 @@ fn websockets_err() -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/hub/negotiate")]
|
#[post("/hub/negotiate")]
|
||||||
fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
fn negotiate(_headers: Headers, _conn: DbConn) -> Json<JsonValue> {
|
||||||
use crate::crypto;
|
use crate::crypto;
|
||||||
use data_encoding::BASE64URL;
|
use data_encoding::BASE64URL;
|
||||||
|
|
||||||
@@ -48,10 +46,10 @@ fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
|||||||
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
|
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
|
||||||
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
||||||
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"connectionId": conn_id,
|
"connectionId": conn_id,
|
||||||
"availableTransports": available_transports
|
"availableTransports": available_transports
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@@ -121,7 +119,7 @@ fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Server WebSocket handler
|
// Server WebSocket handler
|
||||||
pub struct WSHandler {
|
pub struct WsHandler {
|
||||||
out: Sender,
|
out: Sender,
|
||||||
user_uuid: Option<String>,
|
user_uuid: Option<String>,
|
||||||
users: WebSocketUsers,
|
users: WebSocketUsers,
|
||||||
@@ -141,7 +139,7 @@ const PING: Token = Token(1);
|
|||||||
|
|
||||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||||
|
|
||||||
impl WSHandler {
|
impl WsHandler {
|
||||||
fn err(&self, msg: &'static str) -> ws::Result<()> {
|
fn err(&self, msg: &'static str) -> ws::Result<()> {
|
||||||
self.out.close(ws::CloseCode::Invalid)?;
|
self.out.close(ws::CloseCode::Invalid)?;
|
||||||
|
|
||||||
@@ -161,14 +159,14 @@ impl WSHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Otherwise verify the query parameter value
|
// Otherwise verify the query parameter value
|
||||||
let path = hs.request.resource();
|
let path = hs.request.resource();
|
||||||
if let Some(params) = path.split('?').nth(1) {
|
if let Some(params) = path.split('?').nth(1) {
|
||||||
let params_iter = params.split('&').take(1);
|
let params_iter = params.split('&').take(1);
|
||||||
for val in params_iter {
|
for val in params_iter {
|
||||||
if val.starts_with(ACCESS_TOKEN_KEY) {
|
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
||||||
return Some(val[ACCESS_TOKEN_KEY.len()..].into());
|
return Some(stripped.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -177,7 +175,7 @@ impl WSHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Handler for WSHandler {
|
impl Handler for WsHandler {
|
||||||
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
||||||
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
|
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
|
||||||
//
|
//
|
||||||
@@ -205,9 +203,7 @@ impl Handler for WSHandler {
|
|||||||
let handler_insert = self.out.clone();
|
let handler_insert = self.out.clone();
|
||||||
let handler_update = self.out.clone();
|
let handler_update = self.out.clone();
|
||||||
|
|
||||||
self.users
|
self.users.map.upsert(user_uuid, || vec![handler_insert], |ref mut v| v.push(handler_update));
|
||||||
.map
|
|
||||||
.upsert(user_uuid, || vec![handler_insert], |ref mut v| v.push(handler_update));
|
|
||||||
|
|
||||||
// Schedule a ping to keep the connection alive
|
// Schedule a ping to keep the connection alive
|
||||||
self.out.timeout(PING_MS, PING)
|
self.out.timeout(PING_MS, PING)
|
||||||
@@ -217,7 +213,11 @@ impl Handler for WSHandler {
|
|||||||
if let Message::Text(text) = msg.clone() {
|
if let Message::Text(text) = msg.clone() {
|
||||||
let json = &text[..text.len() - 1]; // Remove last char
|
let json = &text[..text.len() - 1]; // Remove last char
|
||||||
|
|
||||||
if let Ok(InitialMessage { protocol, version }) = from_str::<InitialMessage>(json) {
|
if let Ok(InitialMessage {
|
||||||
|
protocol,
|
||||||
|
version,
|
||||||
|
}) = from_str::<InitialMessage>(json)
|
||||||
|
{
|
||||||
if &protocol == "messagepack" && version == 1 {
|
if &protocol == "messagepack" && version == 1 {
|
||||||
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
|
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
|
||||||
}
|
}
|
||||||
@@ -241,13 +241,13 @@ impl Handler for WSHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct WSFactory {
|
struct WsFactory {
|
||||||
pub users: WebSocketUsers,
|
pub users: WebSocketUsers,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WSFactory {
|
impl WsFactory {
|
||||||
pub fn init() -> Self {
|
pub fn init() -> Self {
|
||||||
WSFactory {
|
WsFactory {
|
||||||
users: WebSocketUsers {
|
users: WebSocketUsers {
|
||||||
map: Arc::new(CHashMap::new()),
|
map: Arc::new(CHashMap::new()),
|
||||||
},
|
},
|
||||||
@@ -255,11 +255,11 @@ impl WSFactory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Factory for WSFactory {
|
impl Factory for WsFactory {
|
||||||
type Handler = WSHandler;
|
type Handler = WsHandler;
|
||||||
|
|
||||||
fn connection_made(&mut self, out: Sender) -> Self::Handler {
|
fn connection_made(&mut self, out: Sender) -> Self::Handler {
|
||||||
WSHandler {
|
WsHandler {
|
||||||
out,
|
out,
|
||||||
user_uuid: None,
|
user_uuid: None,
|
||||||
users: self.users.clone(),
|
users: self.users.clone(),
|
||||||
@@ -296,10 +296,7 @@ impl WebSocketUsers {
|
|||||||
// NOTE: The last modified date needs to be updated before calling these methods
|
// NOTE: The last modified date needs to be updated before calling these methods
|
||||||
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
|
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![
|
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||||
("UserId".into(), user.uuid.clone().into()),
|
|
||||||
("Date".into(), serialize_date(user.updated_at)),
|
|
||||||
],
|
|
||||||
ut,
|
ut,
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -335,7 +332,7 @@ impl WebSocketUsers {
|
|||||||
);
|
);
|
||||||
|
|
||||||
for uuid in user_uuids {
|
for uuid in user_uuids {
|
||||||
self.send_update(&uuid, &data).ok();
|
self.send_update(uuid, &data).ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -395,6 +392,10 @@ pub enum UpdateType {
|
|||||||
|
|
||||||
LogOut = 11,
|
LogOut = 11,
|
||||||
|
|
||||||
|
SyncSendCreate = 12,
|
||||||
|
SyncSendUpdate = 13,
|
||||||
|
SyncSendDelete = 14,
|
||||||
|
|
||||||
None = 100,
|
None = 100,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -402,7 +403,7 @@ use rocket::State;
|
|||||||
pub type Notify<'a> = State<'a, WebSocketUsers>;
|
pub type Notify<'a> = State<'a, WebSocketUsers>;
|
||||||
|
|
||||||
pub fn start_notification_server() -> WebSocketUsers {
|
pub fn start_notification_server() -> WebSocketUsers {
|
||||||
let factory = WSFactory::init();
|
let factory = WsFactory::init();
|
||||||
let users = factory.users.clone();
|
let users = factory.users.clone();
|
||||||
|
|
||||||
if CONFIG.websocket_enabled() {
|
if CONFIG.websocket_enabled() {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user