mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 18:55:57 +03:00
Compare commits
105 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
ad1d65bdf8 | ||
|
3b283c289e | ||
|
4b9384cb2b | ||
|
0f39d96518 | ||
|
edf7484a70 | ||
|
8b66e34415 | ||
|
1d00e34bbb | ||
|
1b801406d6 | ||
|
5e46a43306 | ||
|
5c77431c2d | ||
|
2775c6ce8a | ||
|
890e668071 | ||
|
596c167312 | ||
|
ae3a153bdb | ||
|
2c36993792 | ||
|
d672ad3f76 | ||
|
a641b48884 | ||
|
98b2178c7d | ||
|
76a3f0f531 | ||
|
c5665e7b77 | ||
|
cbdcf8ef9f | ||
|
3337594d60 | ||
|
2daa8be1f1 | ||
|
eccb3ab947 | ||
|
3246251f29 | ||
|
8ab200224e | ||
|
34e00e1478 | ||
|
0fdda3bc2f | ||
|
48836501bf | ||
|
f863ffb89a | ||
|
03c6ed2e07 | ||
|
efc6eb0073 | ||
|
cec1e87679 | ||
|
512b3b9b7c | ||
|
93da5091e6 | ||
|
915496c103 | ||
|
ecb31c85d6 | ||
|
d722328f05 | ||
|
cb4b683dcd | ||
|
6eaf131922 | ||
|
8933ac2ee7 | ||
|
6822e445bb | ||
|
18fbc1ccf6 | ||
|
4861f6decc | ||
|
b435ee49ad | ||
|
193f86e43e | ||
|
66a7baa67c | ||
|
18d66474e0 | ||
|
ff8db4fd78 | ||
|
b2f9af718e | ||
|
198fd2fc1d | ||
|
ec8a9c82df | ||
|
ef5e0bd4e5 | ||
|
30b408eaa9 | ||
|
e205e3b7db | ||
|
ca1a9e26d8 | ||
|
f3a1385aee | ||
|
008a2cf298 | ||
|
f0c9a7fbc3 | ||
|
9162b13123 | ||
|
480bf9b0c1 | ||
|
f96c5e8a1e | ||
|
3d4be24902 | ||
|
bf41d74501 | ||
|
01e33a4919 | ||
|
bc26bfa589 | ||
|
ccc51e7580 | ||
|
99a59bc4f3 | ||
|
a77482575a | ||
|
bbd630f1ee | ||
|
d18b793c71 | ||
|
d3a1d875d5 | ||
|
d6e0ace192 | ||
|
60cbfa59bf | ||
|
5ab7010c37 | ||
|
ad2cfd8b97 | ||
|
32543c46da | ||
|
66bff73ebf | ||
|
83d5432cbf | ||
|
f579a4154c | ||
|
f5a19c5f8b | ||
|
aa9bc1f785 | ||
|
f162e85e44 | ||
|
33ef70c192 | ||
|
3d2df6ce11 | ||
|
6cdcb3b297 | ||
|
d1af468700 | ||
|
ae1c53f4e5 | ||
|
bc57c4b193 | ||
|
61ae4c9cf5 | ||
|
8d7b3db33d | ||
|
e9ec3741ae | ||
|
dacd50f3f1 | ||
|
9412112639 | ||
|
aaeae16983 | ||
|
d892880dd2 | ||
|
4395e8e888 | ||
|
3dbfc484a5 | ||
|
4ec2507073 | ||
|
ab65d7989b | ||
|
8707728cdb | ||
|
631d022e17 | ||
|
211f4492fa | ||
|
61f9081827 | ||
|
a8e5384c4a |
503
.env.template
503
.env.template
@@ -10,22 +10,63 @@
|
|||||||
## variable ENV_FILE can be set to the location of this file prior to starting
|
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||||
## Vaultwarden.
|
## Vaultwarden.
|
||||||
|
|
||||||
|
####################
|
||||||
|
### Data folders ###
|
||||||
|
####################
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
|
# ATTACHMENTS_FOLDER=data/attachments
|
||||||
|
# SENDS_FOLDER=data/sends
|
||||||
|
# TMP_FOLDER=data/tmp
|
||||||
|
|
||||||
|
## Templates data folder, by default uses embedded templates
|
||||||
|
## Check source code to see the format
|
||||||
|
# TEMPLATES_FOLDER=data/templates
|
||||||
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
|
## Web vault settings
|
||||||
|
# WEB_VAULT_FOLDER=web-vault/
|
||||||
|
# WEB_VAULT_ENABLED=true
|
||||||
|
|
||||||
|
#########################
|
||||||
|
### Database settings ###
|
||||||
|
#########################
|
||||||
|
|
||||||
## Database URL
|
## Database URL
|
||||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||||
# DATABASE_URL=data/db.sqlite3
|
# DATABASE_URL=data/db.sqlite3
|
||||||
## When using MySQL, specify an appropriate connection URI.
|
## When using MySQL, specify an appropriate connection URI.
|
||||||
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
||||||
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||||
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||||
## or keyword/value connection string.
|
## or keyword/value connection string.
|
||||||
## Details:
|
## Details:
|
||||||
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
## - https://docs.diesel.rs/2.1.x/diesel/pg/struct.PgConnection.html
|
||||||
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||||
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||||
|
|
||||||
|
## Enable WAL for the DB
|
||||||
|
## Set to false to avoid enabling WAL during startup.
|
||||||
|
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||||
|
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||||
|
## Please read project wiki page about this setting first before changing the value as it can
|
||||||
|
## cause performance degradation or might render the service unable to start.
|
||||||
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
|
## Database connection retries
|
||||||
|
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||||
|
# DB_CONNECTION_RETRIES=15
|
||||||
|
|
||||||
|
## Database timeout
|
||||||
|
## Timeout when acquiring database connection
|
||||||
|
# DATABASE_TIMEOUT=30
|
||||||
|
|
||||||
## Database max connections
|
## Database max connections
|
||||||
## Define the size of the connection pool used for connecting to the database.
|
## Define the size of the connection pool used for connecting to the database.
|
||||||
# DATABASE_MAX_CONNS=10
|
# DATABASE_MAX_CONNS=10
|
||||||
@@ -39,31 +80,9 @@
|
|||||||
## - PostgreSQL: ""
|
## - PostgreSQL: ""
|
||||||
# DATABASE_CONN_INIT=""
|
# DATABASE_CONN_INIT=""
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
#################
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
### WebSocket ###
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
#################
|
||||||
# ATTACHMENTS_FOLDER=data/attachments
|
|
||||||
# SENDS_FOLDER=data/sends
|
|
||||||
# TMP_FOLDER=data/tmp
|
|
||||||
|
|
||||||
## Templates data folder, by default uses embedded templates
|
|
||||||
## Check source code to see the format
|
|
||||||
# TEMPLATES_FOLDER=/path/to/templates
|
|
||||||
## Automatically reload the templates for every request, slow, use only for development
|
|
||||||
# RELOAD_TEMPLATES=false
|
|
||||||
|
|
||||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
|
||||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
|
||||||
# IP_HEADER=X-Real-IP
|
|
||||||
|
|
||||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_TTL=2592000
|
|
||||||
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_NEGTTL=259200
|
|
||||||
|
|
||||||
## Web vault settings
|
|
||||||
# WEB_VAULT_FOLDER=web-vault/
|
|
||||||
# WEB_VAULT_ENABLED=true
|
|
||||||
|
|
||||||
## Enables websocket notifications
|
## Enables websocket notifications
|
||||||
# WEBSOCKET_ENABLED=false
|
# WEBSOCKET_ENABLED=false
|
||||||
@@ -72,37 +91,24 @@
|
|||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||||
# WEBSOCKET_PORT=3012
|
# WEBSOCKET_PORT=3012
|
||||||
|
|
||||||
|
##########################
|
||||||
|
### Push notifications ###
|
||||||
|
##########################
|
||||||
|
|
||||||
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||||
# PUSH_ENABLED=true
|
## If you choose "European Union" Data Region, uncomment PUSH_RELAY_URI and PUSH_IDENTITY_URI then replace .com by .eu
|
||||||
|
## Details about mobile client push notification:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
||||||
|
# PUSH_ENABLED=false
|
||||||
# PUSH_INSTALLATION_ID=CHANGEME
|
# PUSH_INSTALLATION_ID=CHANGEME
|
||||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||||
## Don't change this unless you know what you're doing.
|
## Don't change this unless you know what you're doing.
|
||||||
# PUSH_RELAY_BASE_URI=https://push.bitwarden.com
|
# PUSH_RELAY_URI=https://push.bitwarden.com
|
||||||
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
||||||
|
|
||||||
## Controls whether users are allowed to create Bitwarden Sends.
|
#####################
|
||||||
## This setting applies globally to all users.
|
### Schedule jobs ###
|
||||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
#####################
|
||||||
# SENDS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether users can enable emergency access to their accounts.
|
|
||||||
## This setting applies globally to all users.
|
|
||||||
# EMERGENCY_ACCESS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether event logging is enabled for organizations
|
|
||||||
## This setting applies to organizations.
|
|
||||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
|
||||||
# ORG_EVENTS_ENABLED=false
|
|
||||||
|
|
||||||
## Number of days to retain events stored in the database.
|
|
||||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
|
||||||
# EVENTS_DAYS_RETAIN=
|
|
||||||
|
|
||||||
## BETA FEATURE: Groups
|
|
||||||
## Controls whether group support is enabled for organizations
|
|
||||||
## This setting applies to organizations.
|
|
||||||
## Disabled by default because this is a beta feature, it contains known issues!
|
|
||||||
## KNOW WHAT YOU ARE DOING!
|
|
||||||
# ORG_GROUPS_ENABLED=false
|
|
||||||
|
|
||||||
## Job scheduler settings
|
## Job scheduler settings
|
||||||
##
|
##
|
||||||
@@ -143,60 +149,69 @@
|
|||||||
## Cron schedule of the job that cleans old events from the event table.
|
## Cron schedule of the job that cleans old events from the event table.
|
||||||
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
||||||
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
||||||
|
## Number of days to retain events stored in the database.
|
||||||
## Enable extended logging, which shows timestamps and targets in the logs
|
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||||
# EXTENDED_LOGGING=true
|
# EVENTS_DAYS_RETAIN=
|
||||||
|
|
||||||
## Timestamp format used in extended logging.
|
|
||||||
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
|
||||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
|
||||||
|
|
||||||
## Logging to file
|
|
||||||
# LOG_FILE=/path/to/log
|
|
||||||
|
|
||||||
## Logging to Syslog
|
|
||||||
## This requires extended logging
|
|
||||||
# USE_SYSLOG=false
|
|
||||||
|
|
||||||
## Log level
|
|
||||||
## Change the verbosity of the log output
|
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
|
||||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
|
||||||
## routes and static file, websocket and alive requests
|
|
||||||
# LOG_LEVEL=Info
|
|
||||||
|
|
||||||
## Enable WAL for the DB
|
|
||||||
## Set to false to avoid enabling WAL during startup.
|
|
||||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
|
||||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
|
||||||
## Please read project wiki page about this setting first before changing the value as it can
|
|
||||||
## cause performance degradation or might render the service unable to start.
|
|
||||||
# ENABLE_DB_WAL=true
|
|
||||||
|
|
||||||
## Database connection retries
|
|
||||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
|
||||||
# DB_CONNECTION_RETRIES=15
|
|
||||||
|
|
||||||
## Icon service
|
|
||||||
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
|
||||||
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
|
||||||
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
|
||||||
##
|
##
|
||||||
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
## Cron schedule of the job that cleans old auth requests from the auth request.
|
||||||
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
## redirect to the corresponding icon at the external service. An external service may
|
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
||||||
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
|
||||||
## you are concerned that someone may probe your instance to try to detect whether icons
|
|
||||||
## for certain sites have been cached.
|
|
||||||
# ICON_SERVICE=internal
|
|
||||||
|
|
||||||
## Icon redirect code
|
########################
|
||||||
## The HTTP status code to use for redirects to an external icon service.
|
### General settings ###
|
||||||
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
########################
|
||||||
## Temporary redirects are useful while testing different icon services, but once a service
|
|
||||||
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
## Domain settings
|
||||||
## are currently better supported by the Bitwarden clients.
|
## The domain must match the address from where you access the server
|
||||||
# ICON_REDIRECT_CODE=302
|
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||||
|
## like attachment downloads, email links and U2F.
|
||||||
|
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||||
|
## To use HTTPS, the recommended way is to put Vaultwarden behind a reverse proxy
|
||||||
|
## Details:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples
|
||||||
|
## For development
|
||||||
|
# DOMAIN=http://localhost
|
||||||
|
## For public server
|
||||||
|
# DOMAIN=https://vw.domain.tld
|
||||||
|
## For public server (URL with port number)
|
||||||
|
# DOMAIN=https://vw.domain.tld:8443
|
||||||
|
## For public server (URL with path)
|
||||||
|
# DOMAIN=https://domain.tld/vw
|
||||||
|
|
||||||
|
## Controls whether users are allowed to create Bitwarden Sends.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||||
|
# SENDS_ALLOWED=true
|
||||||
|
|
||||||
|
## HIBP Api Key
|
||||||
|
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||||
|
# HIBP_API_KEY=
|
||||||
|
|
||||||
|
## Per-organization attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per organization.
|
||||||
|
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||||
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||||
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user send storage limit (KB)
|
||||||
|
## Max kilobytes of send storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further sends.
|
||||||
|
# USER_SEND_LIMIT=
|
||||||
|
|
||||||
|
## Number of days to wait before auto-deleting a trashed item.
|
||||||
|
## If unset (the default), trashed items are not auto-deleted.
|
||||||
|
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||||
|
# TRASH_AUTO_DELETE_DAYS=
|
||||||
|
|
||||||
|
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||||
|
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||||
|
## master password was provided but the required 2FA step was not completed, which
|
||||||
|
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||||
|
|
||||||
## Disable icon downloading
|
## Disable icon downloading
|
||||||
## Set to true to disable icon downloading in the internal icon service.
|
## Set to true to disable icon downloading in the internal icon service.
|
||||||
@@ -205,38 +220,6 @@
|
|||||||
## will be deleted eventually, but won't be downloaded again.
|
## will be deleted eventually, but won't be downloaded again.
|
||||||
# DISABLE_ICON_DOWNLOAD=false
|
# DISABLE_ICON_DOWNLOAD=false
|
||||||
|
|
||||||
## Icon download timeout
|
|
||||||
## Configure the timeout value when downloading the favicons.
|
|
||||||
## The default is 10 seconds, but this could be to low on slower network connections
|
|
||||||
# ICON_DOWNLOAD_TIMEOUT=10
|
|
||||||
|
|
||||||
## Icon blacklist Regex
|
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
|
||||||
## NOTE: Always enclose this regex withing single quotes!
|
|
||||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
|
||||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
|
||||||
|
|
||||||
## Disable 2FA remember
|
|
||||||
## Enabling this would force the users to use a second factor to login every time.
|
|
||||||
## Note that the checkbox would still be present, but ignored.
|
|
||||||
# DISABLE_2FA_REMEMBER=false
|
|
||||||
|
|
||||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
|
||||||
# EMAIL_ATTEMPTS_LIMIT=3
|
|
||||||
|
|
||||||
## Token expiration time
|
|
||||||
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
|
||||||
# EMAIL_EXPIRATION_TIME=600
|
|
||||||
|
|
||||||
## Email token size
|
|
||||||
## Number of digits in an email 2FA token (min: 6, max: 255).
|
|
||||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
|
||||||
# EMAIL_TOKEN_SIZE=6
|
|
||||||
|
|
||||||
## Controls if new users can register
|
## Controls if new users can register
|
||||||
# SIGNUPS_ALLOWED=true
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
@@ -258,6 +241,11 @@
|
|||||||
## even if SIGNUPS_ALLOWED is set to false
|
## even if SIGNUPS_ALLOWED is set to false
|
||||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||||
|
|
||||||
|
## Controls whether event logging is enabled for organizations
|
||||||
|
## This setting applies to organizations.
|
||||||
|
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||||
|
# ORG_EVENTS_ENABLED=false
|
||||||
|
|
||||||
## Controls which users can create new orgs.
|
## Controls which users can create new orgs.
|
||||||
## Blank or 'all' means all users can create orgs (this is the default):
|
## Blank or 'all' means all users can create orgs (this is the default):
|
||||||
# ORG_CREATION_USERS=
|
# ORG_CREATION_USERS=
|
||||||
@@ -266,6 +254,122 @@
|
|||||||
## A comma-separated list means only those users can create orgs:
|
## A comma-separated list means only those users can create orgs:
|
||||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||||
|
|
||||||
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
|
# INVITATIONS_ALLOWED=true
|
||||||
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
|
# INVITATION_ORG_NAME=Vaultwarden
|
||||||
|
|
||||||
|
## The number of hours after which an organization invite token, emergency access invite token,
|
||||||
|
## email verification token and deletion request token will expire (must be at least 1)
|
||||||
|
# INVITATION_EXPIRATION_HOURS=120
|
||||||
|
|
||||||
|
## Controls whether users can enable emergency access to their accounts.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# EMERGENCY_ACCESS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether users can change their email.
|
||||||
|
## This setting applies globally to all users
|
||||||
|
# EMAIL_CHANGE_ALLOWED=true
|
||||||
|
|
||||||
|
## Number of server-side passwords hashing iterations for the password hash.
|
||||||
|
## The default for new users. If changed, it will be updated during login for existing users.
|
||||||
|
# PASSWORD_ITERATIONS=600000
|
||||||
|
|
||||||
|
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||||
|
# PASSWORD_HINTS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether a password hint should be shown directly in the web page if
|
||||||
|
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||||
|
## as this provides unauthenticated access to potentially sensitive data.
|
||||||
|
# SHOW_PASSWORD_HINT=false
|
||||||
|
|
||||||
|
#########################
|
||||||
|
### Advanced settings ###
|
||||||
|
#########################
|
||||||
|
|
||||||
|
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||||
|
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
|
# IP_HEADER=X-Real-IP
|
||||||
|
|
||||||
|
## Icon service
|
||||||
|
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||||
|
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||||
|
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||||
|
##
|
||||||
|
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||||
|
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||||
|
## redirect to the corresponding icon at the external service. An external service may
|
||||||
|
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||||
|
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||||
|
## for certain sites have been cached.
|
||||||
|
# ICON_SERVICE=internal
|
||||||
|
|
||||||
|
## Icon redirect code
|
||||||
|
## The HTTP status code to use for redirects to an external icon service.
|
||||||
|
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||||
|
## Temporary redirects are useful while testing different icon services, but once a service
|
||||||
|
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||||
|
## are currently better supported by the Bitwarden clients.
|
||||||
|
# ICON_REDIRECT_CODE=302
|
||||||
|
|
||||||
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (30 days)
|
||||||
|
# ICON_CACHE_TTL=2592000
|
||||||
|
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (3 days)
|
||||||
|
# ICON_CACHE_NEGTTL=259200
|
||||||
|
|
||||||
|
## Icon download timeout
|
||||||
|
## Configure the timeout value when downloading the favicons.
|
||||||
|
## The default is 10 seconds, but this could be to low on slower network connections
|
||||||
|
# ICON_DOWNLOAD_TIMEOUT=10
|
||||||
|
|
||||||
|
## Icon blacklist Regex
|
||||||
|
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
|
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
|
## Any IP which is not defined as a global IP will be blacklisted.
|
||||||
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
|
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
|
## Client Settings
|
||||||
|
## Enable experimental feature flags for clients.
|
||||||
|
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
|
||||||
|
##
|
||||||
|
## The following flags are available:
|
||||||
|
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
|
||||||
|
## - "autofill-v2": Use the new autofill implementation.
|
||||||
|
## - "browser-fileless-import": Directly import credentials from other providers without a file.
|
||||||
|
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
|
||||||
|
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||||
|
|
||||||
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
|
## If sending the email fails the login attempt will fail!!
|
||||||
|
# REQUIRE_DEVICE_EMAIL=false
|
||||||
|
|
||||||
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
|
## Timestamp format used in extended logging.
|
||||||
|
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||||
|
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||||
|
|
||||||
|
## Logging to Syslog
|
||||||
|
## This requires extended logging
|
||||||
|
# USE_SYSLOG=false
|
||||||
|
|
||||||
|
## Logging to file
|
||||||
|
# LOG_FILE=/path/to/log
|
||||||
|
|
||||||
|
## Log level
|
||||||
|
## Change the verbosity of the log output
|
||||||
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
|
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||||
|
## routes and static file, websocket and alive requests
|
||||||
|
# LOG_LEVEL=info
|
||||||
|
|
||||||
## Token for the admin interface, preferably an Argon2 PCH string
|
## Token for the admin interface, preferably an Argon2 PCH string
|
||||||
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
||||||
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
||||||
@@ -281,54 +385,13 @@
|
|||||||
## meant to be used with the use of a separate auth layer in front
|
## meant to be used with the use of a separate auth layer in front
|
||||||
# DISABLE_ADMIN_TOKEN=false
|
# DISABLE_ADMIN_TOKEN=false
|
||||||
|
|
||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||||
# INVITATIONS_ALLOWED=true
|
# ADMIN_RATELIMIT_SECONDS=300
|
||||||
## Name shown in the invitation emails that don't come from a specific organization
|
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||||
# INVITATION_ORG_NAME=Vaultwarden
|
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||||
|
|
||||||
## The number of hours after which an organization invite token, emergency access invite token,
|
## Set the lifetime of admin sessions to this value (in minutes).
|
||||||
## email verification token and deletion request token will expire (must be at least 1)
|
# ADMIN_SESSION_LIFETIME=20
|
||||||
# INVITATION_EXPIRATION_HOURS=120
|
|
||||||
|
|
||||||
## Per-organization attachment storage limit (KB)
|
|
||||||
## Max kilobytes of attachment storage allowed per organization.
|
|
||||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
|
||||||
# ORG_ATTACHMENT_LIMIT=
|
|
||||||
## Per-user attachment storage limit (KB)
|
|
||||||
## Max kilobytes of attachment storage allowed per user.
|
|
||||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
|
||||||
# USER_ATTACHMENT_LIMIT=
|
|
||||||
|
|
||||||
## Number of days to wait before auto-deleting a trashed item.
|
|
||||||
## If unset (the default), trashed items are not auto-deleted.
|
|
||||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
|
||||||
# TRASH_AUTO_DELETE_DAYS=
|
|
||||||
|
|
||||||
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
|
||||||
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
|
||||||
## master password was provided but the required 2FA step was not completed, which
|
|
||||||
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
|
||||||
## This setting applies globally to all users.
|
|
||||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
|
||||||
|
|
||||||
## Number of server-side passwords hashing iterations for the password hash.
|
|
||||||
## The default for new users. If changed, it will be updated during login for existing users.
|
|
||||||
# PASSWORD_ITERATIONS=350000
|
|
||||||
|
|
||||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
|
||||||
# PASSWORD_HINTS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether a password hint should be shown directly in the web page if
|
|
||||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
|
||||||
## as this provides unauthenticated access to potentially sensitive data.
|
|
||||||
# SHOW_PASSWORD_HINT=false
|
|
||||||
|
|
||||||
## Domain settings
|
|
||||||
## The domain must match the address from where you access the server
|
|
||||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
|
||||||
## like attachment downloads, email links and U2F.
|
|
||||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
|
||||||
# DOMAIN=https://vw.domain.tld:8443
|
|
||||||
|
|
||||||
## Allowed iframe ancestors (Know the risks!)
|
## Allowed iframe ancestors (Know the risks!)
|
||||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||||
@@ -343,13 +406,16 @@
|
|||||||
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||||
# LOGIN_RATELIMIT_MAX_BURST=10
|
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||||
|
|
||||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
## BETA FEATURE: Groups
|
||||||
# ADMIN_RATELIMIT_SECONDS=300
|
## Controls whether group support is enabled for organizations
|
||||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
## This setting applies to organizations.
|
||||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
## Disabled by default because this is a beta feature, it contains known issues!
|
||||||
|
## KNOW WHAT YOU ARE DOING!
|
||||||
|
# ORG_GROUPS_ENABLED=false
|
||||||
|
|
||||||
## Set the lifetime of admin sessions to this value (in minutes).
|
########################
|
||||||
# ADMIN_SESSION_LIFETIME=20
|
### MFA/2FA settings ###
|
||||||
|
########################
|
||||||
|
|
||||||
## Yubico (Yubikey) Settings
|
## Yubico (Yubikey) Settings
|
||||||
## Set your Client ID and Secret Key for Yubikey OTP
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
@@ -370,6 +436,25 @@
|
|||||||
## After that, you should be able to follow the rest of the guide linked above,
|
## After that, you should be able to follow the rest of the guide linked above,
|
||||||
## ignoring the fields that ask for the values that you already configured beforehand.
|
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||||
|
|
||||||
|
## Email 2FA settings
|
||||||
|
## Email token size
|
||||||
|
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||||
|
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||||
|
# EMAIL_TOKEN_SIZE=6
|
||||||
|
##
|
||||||
|
## Token expiration time
|
||||||
|
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||||
|
# EMAIL_EXPIRATION_TIME=600
|
||||||
|
##
|
||||||
|
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||||
|
# EMAIL_ATTEMPTS_LIMIT=3
|
||||||
|
|
||||||
|
## Other MFA/2FA settings
|
||||||
|
## Disable 2FA remember
|
||||||
|
## Enabling this would force the users to use a second factor to login every time.
|
||||||
|
## Note that the checkbox would still be present, but ignored.
|
||||||
|
# DISABLE_2FA_REMEMBER=false
|
||||||
|
##
|
||||||
## Authenticator Settings
|
## Authenticator Settings
|
||||||
## Disable authenticator time drifted codes to be valid.
|
## Disable authenticator time drifted codes to be valid.
|
||||||
## TOTP codes of the previous and next 30 seconds will be invalid
|
## TOTP codes of the previous and next 30 seconds will be invalid
|
||||||
@@ -382,12 +467,9 @@
|
|||||||
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
||||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
||||||
|
|
||||||
## Rocket specific settings
|
###########################
|
||||||
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
|
### SMTP Email settings ###
|
||||||
# ROCKET_ADDRESS=0.0.0.0
|
###########################
|
||||||
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
|
||||||
# ROCKET_WORKERS=10
|
|
||||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
|
||||||
|
|
||||||
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
||||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||||
@@ -409,7 +491,7 @@
|
|||||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||||
## Multiple options need to be separated by a comma ','.
|
## Multiple options need to be separated by a comma ','.
|
||||||
# SMTP_AUTH_MECHANISM="Plain"
|
# SMTP_AUTH_MECHANISM=
|
||||||
|
|
||||||
## Server name sent during the SMTP HELO
|
## Server name sent during the SMTP HELO
|
||||||
## By default this value should be is on the machine's hostname,
|
## By default this value should be is on the machine's hostname,
|
||||||
@@ -417,30 +499,33 @@
|
|||||||
# HELO_NAME=
|
# HELO_NAME=
|
||||||
|
|
||||||
## Embed images as email attachments
|
## Embed images as email attachments
|
||||||
# SMTP_EMBED_IMAGES=false
|
# SMTP_EMBED_IMAGES=true
|
||||||
|
|
||||||
## SMTP debugging
|
## SMTP debugging
|
||||||
## When set to true this will output very detailed SMTP messages.
|
## When set to true this will output very detailed SMTP messages.
|
||||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
# SMTP_DEBUG=false
|
# SMTP_DEBUG=false
|
||||||
|
|
||||||
## Accept Invalid Hostnames
|
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
|
||||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
|
||||||
|
|
||||||
## Accept Invalid Certificates
|
## Accept Invalid Certificates
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||||
# SMTP_ACCEPT_INVALID_CERTS=false
|
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||||
|
|
||||||
## Require new device emails. When a user logs in an email is required to be sent.
|
## Accept Invalid Hostnames
|
||||||
## If sending the email fails the login attempt will fail!!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
# REQUIRE_DEVICE_EMAIL=false
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
|
##########################
|
||||||
|
### Rocket settings ###
|
||||||
|
##########################
|
||||||
|
|
||||||
|
## Rocket specific settings
|
||||||
|
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
|
||||||
|
# ROCKET_ADDRESS=0.0.0.0
|
||||||
|
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
||||||
|
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||||
|
|
||||||
## HIBP Api Key
|
|
||||||
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
|
||||||
# HIBP_API_KEY=
|
|
||||||
|
|
||||||
# vim: syntax=ini
|
# vim: syntax=ini
|
||||||
|
3
.github/CODEOWNERS
vendored
Normal file
3
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
/.github @dani-garcia @BlackDex
|
||||||
|
/.github/CODEOWNERS @dani-garcia @BlackDex
|
||||||
|
/.github/workflows/** @dani-garcia @BlackDex
|
75
.github/workflows/build.yml
vendored
75
.github/workflows/build.yml
vendored
@@ -8,9 +8,11 @@ on:
|
|||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- ".github/workflows/build.yml"
|
- ".github/workflows/build.yml"
|
||||||
@@ -18,19 +20,20 @@ on:
|
|||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
# Make warnings errors, this is to prevent warnings slipping through.
|
# Make warnings errors, this is to prevent warnings slipping through.
|
||||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||||
env:
|
env:
|
||||||
RUSTFLAGS: "-D warnings"
|
RUSTFLAGS: "-D warnings"
|
||||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -43,13 +46,13 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: "Checkout"
|
- name: "Checkout"
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
- name: "Install dependencies Ubuntu"
|
- name: "Install dependencies Ubuntu"
|
||||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||||
# End Install dependencies
|
# End Install dependencies
|
||||||
|
|
||||||
|
|
||||||
@@ -59,7 +62,7 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)"
|
||||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||||
else
|
else
|
||||||
@@ -71,7 +74,7 @@ jobs:
|
|||||||
|
|
||||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||||
- name: "Install rust-toolchain version"
|
- name: "Install rust-toolchain version"
|
||||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
@@ -81,17 +84,19 @@ jobs:
|
|||||||
|
|
||||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||||
- name: "Install MSRV version"
|
- name: "Install MSRV version"
|
||||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
|
||||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
# End Install the MSRV channel to be used
|
# End Install the MSRV channel to be used
|
||||||
|
|
||||||
|
# Set the current matrix toolchain version as default
|
||||||
# Enable Rust Caching
|
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||||
- uses: Swatinem/rust-cache@2656b87321093db1cb55fbd73183d195214fdfd1 # v2.5.0
|
run: |
|
||||||
# End Enable Rust Caching
|
# Remove the rust-toolchain.toml
|
||||||
|
rm rust-toolchain.toml
|
||||||
|
# Set the default
|
||||||
|
rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||||
|
|
||||||
# Show environment
|
# Show environment
|
||||||
- name: "Show environment"
|
- name: "Show environment"
|
||||||
@@ -100,47 +105,55 @@ jobs:
|
|||||||
cargo -vV
|
cargo -vV
|
||||||
# End Show environment
|
# End Show environment
|
||||||
|
|
||||||
|
# Enable Rust Caching
|
||||||
|
- uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3
|
||||||
|
with:
|
||||||
|
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||||
|
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||||
|
# Only update when really needed! Use a <year>.<month>[.<inc>] format.
|
||||||
|
prefix-key: "v2023.07-rust"
|
||||||
|
# End Enable Rust Caching
|
||||||
|
|
||||||
# Run cargo tests (In release mode to speed up future builds)
|
# Run cargo tests
|
||||||
# First test all features together, afterwards test them separately.
|
# First test all features together, afterwards test them separately.
|
||||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: test_sqlite_mysql_postgresql_mimalloc
|
id: test_sqlite_mysql_postgresql_mimalloc
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql,enable_mimalloc
|
cargo test --features sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
- name: "test features: sqlite,mysql,postgresql"
|
- name: "test features: sqlite,mysql,postgresql"
|
||||||
id: test_sqlite_mysql_postgresql
|
id: test_sqlite_mysql_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql
|
cargo test --features sqlite,mysql,postgresql
|
||||||
|
|
||||||
- name: "test features: sqlite"
|
- name: "test features: sqlite"
|
||||||
id: test_sqlite
|
id: test_sqlite
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite
|
cargo test --features sqlite
|
||||||
|
|
||||||
- name: "test features: mysql"
|
- name: "test features: mysql"
|
||||||
id: test_mysql
|
id: test_mysql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features mysql
|
cargo test --features mysql
|
||||||
|
|
||||||
- name: "test features: postgresql"
|
- name: "test features: postgresql"
|
||||||
id: test_postgresql
|
id: test_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features postgresql
|
cargo test --features postgresql
|
||||||
# End Run cargo tests
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
# Run cargo clippy, and fail on warnings
|
||||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: clippy
|
id: clippy
|
||||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||||
run: |
|
run: |
|
||||||
cargo clippy --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||||
# End Run cargo clippy
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
@@ -182,21 +195,3 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
|
||||||
# Build the binary to upload to the artifacts
|
|
||||||
- name: "build features: sqlite,mysql,postgresql"
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
run: |
|
|
||||||
cargo build --release --features sqlite,mysql,postgresql
|
|
||||||
# End Build the binary
|
|
||||||
|
|
||||||
|
|
||||||
# Upload artifact to Github Actions
|
|
||||||
- name: "Upload artifact"
|
|
||||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
with:
|
|
||||||
name: vaultwarden
|
|
||||||
path: target/release/vaultwarden
|
|
||||||
# End Upload artifact to Github Actions
|
|
||||||
|
7
.github/workflows/hadolint.yml
vendored
7
.github/workflows/hadolint.yml
vendored
@@ -8,15 +8,14 @@ on: [
|
|||||||
jobs:
|
jobs:
|
||||||
hadolint:
|
hadolint:
|
||||||
name: Validate Dockerfile syntax
|
name: Validate Dockerfile syntax
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
- name: Download hadolint
|
- name: Download hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -30,5 +29,5 @@ jobs:
|
|||||||
# Test Dockerfiles
|
# Test Dockerfiles
|
||||||
- name: Run hadolint
|
- name: Run hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored --cached | xargs hadolint
|
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||||
# End Test Dockerfiles
|
# End Test Dockerfiles
|
||||||
|
277
.github/workflows/release.yml
vendored
277
.github/workflows/release.yml
vendored
@@ -6,12 +6,11 @@ on:
|
|||||||
- ".github/workflows/release.yml"
|
- ".github/workflows/release.yml"
|
||||||
- "src/**"
|
- "src/**"
|
||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "hooks/**"
|
|
||||||
- "docker/**"
|
- "docker/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
|
|
||||||
branches: # Only on paths above
|
branches: # Only on paths above
|
||||||
- main
|
- main
|
||||||
@@ -24,34 +23,31 @@ jobs:
|
|||||||
# Some checks to determine if we need to continue with building a new docker.
|
# Some checks to determine if we need to continue with building a new docker.
|
||||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||||
skip_check:
|
skip_check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
outputs:
|
outputs:
|
||||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||||
steps:
|
steps:
|
||||||
- name: Skip Duplicates Actions
|
- name: Skip Duplicates Actions
|
||||||
id: skip_check
|
id: skip_check
|
||||||
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
|
uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
|
||||||
with:
|
with:
|
||||||
cancel_others: 'true'
|
cancel_others: 'true'
|
||||||
# Only run this when not creating a tag
|
# Only run this when not creating a tag
|
||||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
if: ${{ github.ref_type == 'branch' }}
|
||||||
|
|
||||||
docker-build:
|
docker-build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
needs: skip_check
|
needs: skip_check
|
||||||
# Start a local docker registry to be used to generate multi-arch images.
|
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
|
# Start a local docker registry to extract the final Alpine static build binaries
|
||||||
services:
|
services:
|
||||||
registry:
|
registry:
|
||||||
image: registry:2
|
image: registry:2
|
||||||
ports:
|
ports:
|
||||||
- 5000:5000
|
- 5000:5000
|
||||||
env:
|
env:
|
||||||
# Use BuildKit (https://docs.docker.com/build/buildkit/) for better
|
|
||||||
# build performance and the ability to copy extended file attributes
|
|
||||||
# (e.g., for executable capabilities) across build phases.
|
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
SOURCE_COMMIT: ${{ github.sha }}
|
SOURCE_COMMIT: ${{ github.sha }}
|
||||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||||
# The *_REPO variables need to be configured as repository variables
|
# The *_REPO variables need to be configured as repository variables
|
||||||
@@ -65,7 +61,6 @@ jobs:
|
|||||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||||
# Check for Quay.io credentials in secrets
|
# Check for Quay.io credentials in secrets
|
||||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
base_image: ["debian","alpine"]
|
base_image: ["debian","alpine"]
|
||||||
@@ -73,163 +68,201 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Determine Docker Tag
|
- name: Initialize QEMU binfmt support
|
||||||
- name: Init Variables
|
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
||||||
id: vars
|
with:
|
||||||
|
platforms: "arm64,arm"
|
||||||
|
|
||||||
|
# Start Docker Buildx
|
||||||
|
- name: Setup Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||||
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
|
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
||||||
|
with:
|
||||||
|
config-inline: |
|
||||||
|
[worker.oci]
|
||||||
|
max-parallelism = 2
|
||||||
|
driver-opts: |
|
||||||
|
network=host
|
||||||
|
|
||||||
|
# Determine Base Tags and Source Version
|
||||||
|
- name: Determine Base Tags and Source Version
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# Check which main tag we are going to build determined by github.ref
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
echo "DOCKER_TAG=${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
|
||||||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
echo "DOCKER_TAG=testing" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
|
||||||
fi
|
fi
|
||||||
# End Determine Docker Tag
|
|
||||||
|
# Get the Source Version for this release
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null || true)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
echo "SOURCE_VERSION=${GIT_EXACT_TAG}" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
echo "SOURCE_VERSION=${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}" | tee -a "${GITHUB_ENV}"
|
||||||
|
fi
|
||||||
|
# End Determine Base Tags
|
||||||
|
|
||||||
# Login to Docker Hub
|
# Login to Docker Hub
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for DockerHub
|
||||||
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to GitHub Container Registry
|
# Login to GitHub Container Registry
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for ghcr.io
|
||||||
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
- name: Add registry for ghcr.io
|
||||||
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to Quay.io
|
# Login to Quay.io
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.QUAY_USERNAME }}
|
||||||
password: ${{ secrets.QUAY_TOKEN }}
|
password: ${{ secrets.QUAY_TOKEN }}
|
||||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
|
|
||||||
# Debian
|
- name: Add registry for Quay.io
|
||||||
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
# Docker Hub
|
|
||||||
- name: Build Debian based images (docker.io)
|
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/build
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (docker.io)
|
- name: Configure build cache from/to
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/push
|
#
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
# Check if there is a GitHub Container Registry Login and use it for caching
|
||||||
|
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
||||||
|
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},mode=max" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
echo "BAKE_CACHE_FROM="
|
||||||
|
echo "BAKE_CACHE_TO="
|
||||||
|
fi
|
||||||
|
#
|
||||||
|
|
||||||
# GitHub Container Registry
|
- name: Add localhost registry
|
||||||
- name: Build Debian based images (ghcr.io)
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/build
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (ghcr.io)
|
- name: Bake ${{ matrix.base_image }} containers
|
||||||
|
uses: docker/bake-action@849707117b03d39aba7924c50a10376a69e88d7d # v4.1.0
|
||||||
|
env:
|
||||||
|
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||||
|
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||||
|
SOURCE_VERSION: "${{ env.SOURCE_VERSION }}"
|
||||||
|
SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}"
|
||||||
|
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
|
||||||
|
with:
|
||||||
|
pull: true
|
||||||
|
push: true
|
||||||
|
files: docker/docker-bake.hcl
|
||||||
|
targets: "${{ matrix.base_image }}-multi"
|
||||||
|
set: |
|
||||||
|
*.cache-from=${{ env.BAKE_CACHE_FROM }}
|
||||||
|
*.cache-to=${{ env.BAKE_CACHE_TO }}
|
||||||
|
|
||||||
|
|
||||||
|
# Extract the Alpine binaries from the containers
|
||||||
|
- name: Extract binaries
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/push
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
|
EXTRACT_TAG="latest"
|
||||||
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
|
EXTRACT_TAG="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
# Quay.io
|
# After each extraction the image is removed.
|
||||||
- name: Build Debian based images (quay.io)
|
# This is needed because using different platforms doesn't trigger a new pull/download
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (quay.io)
|
# Extract amd64 binary
|
||||||
shell: bash
|
docker create --name amd64 --platform=linux/amd64 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
env:
|
docker cp amd64:/vaultwarden vaultwarden-amd64
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
docker rm --force amd64
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Alpine
|
# Extract arm64 binary
|
||||||
|
docker create --name arm64 --platform=linux/arm64 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
docker cp arm64:/vaultwarden vaultwarden-arm64
|
||||||
|
docker rm --force arm64
|
||||||
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
|
||||||
# Docker Hub
|
# Extract armv7 binary
|
||||||
- name: Build Alpine based images (docker.io)
|
docker create --name armv7 --platform=linux/arm/v7 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
shell: bash
|
docker cp armv7:/vaultwarden vaultwarden-armv7
|
||||||
env:
|
docker rm --force armv7
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (docker.io)
|
# Extract armv6 binary
|
||||||
shell: bash
|
docker create --name armv6 --platform=linux/arm/v6 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
env:
|
docker cp armv6:/vaultwarden vaultwarden-armv6
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
docker rm --force armv6
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# GitHub Container Registry
|
# Upload artifacts to Github Actions
|
||||||
- name: Build Alpine based images (ghcr.io)
|
- name: "Upload amd64 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
||||||
run: |
|
path: vaultwarden-amd64
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (ghcr.io)
|
- name: "Upload arm64 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
||||||
run: |
|
path: vaultwarden-arm64
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Quay.io
|
- name: "Upload armv7 artifact"
|
||||||
- name: Build Alpine based images (quay.io)
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
shell: bash
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
env:
|
with:
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
path: vaultwarden-armv7
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (quay.io)
|
- name: "Upload armv6 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
||||||
run: |
|
path: vaultwarden-armv6
|
||||||
./hooks/push
|
# End Upload artifacts to Github Actions
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
25
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
25
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
manual_trigger:
|
||||||
|
description: "Manual trigger buildcache cleanup"
|
||||||
|
required: false
|
||||||
|
default: ""
|
||||||
|
|
||||||
|
schedule:
|
||||||
|
- cron: '0 1 * * FRI'
|
||||||
|
|
||||||
|
name: Cleanup
|
||||||
|
jobs:
|
||||||
|
releasecache-cleanup:
|
||||||
|
name: Releasecache Cleanup
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
steps:
|
||||||
|
- name: Delete vaultwarden-buildcache containers
|
||||||
|
uses: actions/delete-package-versions@0d39a63126868f5eefaa47169615edd3c0f61e20 # v4.1.1
|
||||||
|
with:
|
||||||
|
package-name: 'vaultwarden-buildcache'
|
||||||
|
package-type: 'container'
|
||||||
|
min-versions-to-keep: 0
|
||||||
|
delete-only-untagged-versions: 'false'
|
42
.github/workflows/trivy.yml
vendored
Normal file
42
.github/workflows/trivy.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: trivy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
pull_request:
|
||||||
|
branches: [ "main" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '00 12 * * *'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
trivy-scan:
|
||||||
|
name: Check
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
actions: read
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
||||||
|
|
||||||
|
- name: Run Trivy vulnerability scanner
|
||||||
|
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # v0.16.1
|
||||||
|
with:
|
||||||
|
scan-type: repo
|
||||||
|
ignore-unfixed: true
|
||||||
|
format: sarif
|
||||||
|
output: trivy-results.sarif
|
||||||
|
severity: CRITICAL,HIGH
|
||||||
|
|
||||||
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
|
uses: github/codeql-action/upload-sarif@b7bf0a3ed3ecfa44160715d7c442788f65f0f923 # v3.23.2
|
||||||
|
with:
|
||||||
|
sarif_file: 'trivy-results.sarif'
|
@@ -1,10 +1,12 @@
|
|||||||
ignored:
|
ignored:
|
||||||
|
# To prevent issues and make clear some images only work on linux/amd64, we ignore this
|
||||||
|
- DL3029
|
||||||
# disable explicit version for apt install
|
# disable explicit version for apt install
|
||||||
- DL3008
|
- DL3008
|
||||||
# disable explicit version for apk install
|
# disable explicit version for apk install
|
||||||
- DL3018
|
- DL3018
|
||||||
# disable check for consecutive `RUN` instructions
|
# Ignore shellcheck info message
|
||||||
- DL3059
|
- SC1091
|
||||||
trustedRegistries:
|
trustedRegistries:
|
||||||
- docker.io
|
- docker.io
|
||||||
- ghcr.io
|
- ghcr.io
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.4.0
|
rev: v4.5.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
- id: check-json
|
- id: check-json
|
||||||
|
1643
Cargo.lock
generated
1643
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
115
Cargo.toml
115
Cargo.toml
@@ -3,7 +3,7 @@ name = "vaultwarden"
|
|||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.68.2"
|
rust-version = "1.73.0"
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
@@ -40,71 +40,71 @@ syslog = "6.1.0"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.19"
|
log = "0.4.20"
|
||||||
fern = { version = "0.6.2", features = ["syslog-6"] }
|
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
||||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenvy = { version = "0.15.7", default-features = false }
|
dotenvy = { version = "0.15.7", default-features = false }
|
||||||
|
|
||||||
# Lazy initialization
|
# Lazy initialization
|
||||||
once_cell = "1.18.0"
|
once_cell = "1.19.0"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.17"
|
||||||
num-derive = "0.4.0"
|
num-derive = "0.4.1"
|
||||||
|
bigdecimal = "0.4.2"
|
||||||
|
|
||||||
# Web framework
|
# Web framework
|
||||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
rocket = { version = "0.5.0", features = ["tls", "json"], default-features = false }
|
||||||
# rocket_ws = { version ="0.1.0-rc.3" }
|
rocket_ws = { version ="0.1.0" }
|
||||||
rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # v0.5 branch
|
|
||||||
|
|
||||||
# WebSockets libraries
|
# WebSockets libraries
|
||||||
tokio-tungstenite = "0.19.0"
|
tokio-tungstenite = "0.20.1"
|
||||||
rmpv = "1.0.0" # MessagePack library
|
rmpv = "1.0.1" # MessagePack library
|
||||||
|
|
||||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||||
dashmap = "5.4.0"
|
dashmap = "5.5.3"
|
||||||
|
|
||||||
# Async futures
|
# Async futures
|
||||||
futures = "0.3.28"
|
futures = "0.3.30"
|
||||||
tokio = { version = "1.29.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
tokio = { version = "1.35.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.166", features = ["derive"] }
|
serde = { version = "1.0.195", features = ["derive"] }
|
||||||
serde_json = "1.0.99"
|
serde_json = "1.0.111"
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "2.1.0", features = ["chrono", "r2d2"] }
|
diesel = { version = "2.1.4", features = ["chrono", "r2d2", "numeric"] }
|
||||||
diesel_migrations = "2.1.0"
|
diesel_migrations = "2.1.0"
|
||||||
diesel_logger = { version = "0.3.0", optional = true }
|
diesel_logger = { version = "0.3.0", optional = true }
|
||||||
|
|
||||||
# Bundled/Static SQLite
|
# Bundled/Static SQLite
|
||||||
libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.27.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||||
ring = "0.16.20"
|
ring = "0.17.7"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "1.4.0", features = ["v4"] }
|
uuid = { version = "1.7.0", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.26", features = ["clock", "serde"], default-features = false }
|
chrono = { version = "0.4.33", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.8.3"
|
chrono-tz = "0.8.5"
|
||||||
time = "0.3.22"
|
time = "0.3.31"
|
||||||
|
|
||||||
# Job scheduler
|
# Job scheduler
|
||||||
job_scheduler_ng = "2.0.4"
|
job_scheduler_ng = "2.0.4"
|
||||||
|
|
||||||
# Data encoding library Hex/Base32/Base64
|
# Data encoding library Hex/Base32/Base64
|
||||||
data-encoding = "2.4.0"
|
data-encoding = "2.5.0"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "8.3.0"
|
jsonwebtoken = "9.2.0"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
totp-lite = "2.0.0"
|
totp-lite = "2.0.1"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||||
@@ -113,71 +113,80 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
|||||||
webauthn-rs = "0.3.2"
|
webauthn-rs = "0.3.2"
|
||||||
|
|
||||||
# Handling of URL's for WebAuthn and favicons
|
# Handling of URL's for WebAuthn and favicons
|
||||||
url = "2.4.0"
|
url = "2.5.0"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.10.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
lettre = { version = "0.11.3", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||||
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||||
email_address = "0.2.4"
|
email_address = "0.2.4"
|
||||||
|
|
||||||
# HTML Template library
|
# HTML Template library
|
||||||
handlebars = { version = "4.3.7", features = ["dir_source"] }
|
handlebars = { version = "5.1.1", features = ["dir_source"] }
|
||||||
|
|
||||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||||
reqwest = { version = "0.11.18", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
reqwest = { version = "0.11.23", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
|
||||||
|
|
||||||
# Favicon extraction libraries
|
# Favicon extraction libraries
|
||||||
html5gum = "0.5.3"
|
html5gum = "0.5.7"
|
||||||
regex = { version = "1.8.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
regex = { version = "1.10.3", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
data-url = "0.3.0"
|
data-url = "0.3.1"
|
||||||
bytes = "1.4.0"
|
bytes = "1.5.0"
|
||||||
|
|
||||||
# Cache function results (Used for version check and favicon fetching)
|
# Cache function results (Used for version check and favicon fetching)
|
||||||
cached = "0.44.0"
|
cached = { version = "0.48.1", features = ["async"] }
|
||||||
|
|
||||||
# Used for custom short lived cookie jar during favicon extraction
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
cookie = "0.16.2"
|
cookie = "0.16.2"
|
||||||
cookie_store = "0.19.1"
|
cookie_store = "0.19.1"
|
||||||
|
|
||||||
# Used by U2F, JWT and PostgreSQL
|
# Used by U2F, JWT and PostgreSQL
|
||||||
openssl = "0.10.55"
|
openssl = "0.10.63"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
pico-args = "0.5.0"
|
pico-args = "0.5.0"
|
||||||
|
|
||||||
# Macro ident concatenation
|
# Macro ident concatenation
|
||||||
paste = "1.0.13"
|
paste = "1.0.14"
|
||||||
governor = "0.5.1"
|
governor = "0.6.0"
|
||||||
|
|
||||||
# Check client versions for specific features.
|
# Check client versions for specific features.
|
||||||
semver = "1.0.17"
|
semver = "1.0.21"
|
||||||
|
|
||||||
# Allow overriding the default memory allocator
|
# Allow overriding the default memory allocator
|
||||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
mimalloc = { version = "0.1.37", features = ["secure"], default-features = false, optional = true }
|
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
|
||||||
which = "4.4.0"
|
which = "6.0.0"
|
||||||
|
|
||||||
# Argon2 library with support for the PHC format
|
# Argon2 library with support for the PHC format
|
||||||
argon2 = "0.5.0"
|
argon2 = "0.5.3"
|
||||||
|
|
||||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||||
rpassword = "7.2.0"
|
rpassword = "7.3.1"
|
||||||
|
|
||||||
[patch.crates-io]
|
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
|
|
||||||
# Strip debuginfo from the release builds
|
# Strip debuginfo from the release builds
|
||||||
# Also enable thin LTO for some optimizations
|
# The symbols are the provide better panic traces
|
||||||
|
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||||
[profile.release]
|
[profile.release]
|
||||||
strip = "debuginfo"
|
strip = "debuginfo"
|
||||||
lto = "thin"
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
|
||||||
|
|
||||||
|
# A little bit of a speedup
|
||||||
|
[profile.dev]
|
||||||
|
split-debuginfo = "unpacked"
|
||||||
|
|
||||||
# Always build argon2 using opt-level 3
|
# Always build argon2 using opt-level 3
|
||||||
# This is a huge speed improvement during testing
|
# This is a huge speed improvement during testing
|
||||||
[profile.dev.package.argon2]
|
[profile.dev.package.argon2]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
|
|
||||||
# A little bit of a speedup
|
# Optimize for size
|
||||||
[profile.dev]
|
[profile.release-micro]
|
||||||
split-debuginfo = "unpacked"
|
inherits = "release"
|
||||||
|
opt-level = "z"
|
||||||
|
strip = "symbols"
|
||||||
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
panic = "abort"
|
||||||
|
@@ -1 +1 @@
|
|||||||
docker/amd64/Dockerfile
|
docker/Dockerfile.debian
|
@@ -42,7 +42,7 @@ docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p
|
|||||||
```
|
```
|
||||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
**IMPORTANT**: Most modern web browsers, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||||
|
|
||||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||||
|
|
||||||
@@ -92,4 +92,11 @@ Thanks for your contribution to the project!
|
|||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/IQ333777" style="width: 75px">
|
||||||
|
<sub><b>IQ333777</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
9
build.rs
9
build.rs
@@ -17,8 +17,15 @@ fn main() {
|
|||||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Rerun when these paths are changed.
|
||||||
|
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
||||||
|
println!("cargo:rerun-if-changed=.git");
|
||||||
|
println!("cargo:rerun-if-changed=.git/HEAD");
|
||||||
|
println!("cargo:rerun-if-changed=.git/index");
|
||||||
|
println!("cargo:rerun-if-changed=.git/refs/tags");
|
||||||
|
|
||||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||||
compile_error!("Query Logging is only allowed during development, it is not intented for production usage!");
|
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
|
||||||
|
|
||||||
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||||
// If neither exist, read from git.
|
// If neither exist, read from git.
|
||||||
|
28
docker/DockerSettings.yaml
Normal file
28
docker/DockerSettings.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
vault_version: "v2024.1.2"
|
||||||
|
vault_image_digest: "sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b"
|
||||||
|
# Cross Compile Docker Helper Scripts v1.3.0
|
||||||
|
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||||
|
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
|
||||||
|
rust_version: 1.75.0 # Rust version to be used
|
||||||
|
debian_version: bookworm # Debian release name to be used
|
||||||
|
alpine_version: 3.19 # Alpine version to be used
|
||||||
|
# For which platforms/architectures will we try to build images
|
||||||
|
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
# Determine the build images per OS/Arch
|
||||||
|
build_stage_image:
|
||||||
|
debian:
|
||||||
|
image: "docker.io/library/rust:{{rust_version}}-slim-{{debian_version}}"
|
||||||
|
platform: "$BUILDPLATFORM"
|
||||||
|
alpine:
|
||||||
|
image: "build_${TARGETARCH}${TARGETVARIANT}"
|
||||||
|
platform: "linux/amd64" # The Alpine build images only have linux/amd64 images
|
||||||
|
arch_image:
|
||||||
|
amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}"
|
||||||
|
arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}"
|
||||||
|
armv7: "ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-{{rust_version}}"
|
||||||
|
armv6: "ghcr.io/blackdex/rust-musl:arm-musleabi-stable-{{rust_version}}"
|
||||||
|
# The final image which will be used to distribute the container images
|
||||||
|
runtime_stage_image:
|
||||||
|
debian: "docker.io/library/debian:{{debian_version}}-slim"
|
||||||
|
alpine: "docker.io/library/alpine:{{alpine_version}}"
|
161
docker/Dockerfile.alpine
Normal file
161
docker/Dockerfile.alpine
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2024.1.2]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b as vault
|
||||||
|
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.75.0 as build_amd64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.75.0 as build_arm64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.75.0 as build_armv7
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.75.0 as build_armv6
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root" \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Shared variables across Debian and Alpine
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.19
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apk --no-cache add \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
openssl \
|
||||||
|
tzdata
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,34 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
|
||||||
# manifest, rather than the target arch. For example:
|
|
||||||
#
|
|
||||||
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
|
||||||
# amd64
|
|
||||||
#
|
|
||||||
# Recent versions of Docker have started printing a warning when the image's
|
|
||||||
# claimed arch doesn't match the host arch. For example:
|
|
||||||
#
|
|
||||||
# WARNING: The requested image's platform (linux/amd64) does not match the
|
|
||||||
# detected host platform (linux/arm/v7) and no specific platform was requested
|
|
||||||
#
|
|
||||||
# The image still works fine, but the spurious warning creates confusion.
|
|
||||||
#
|
|
||||||
# Docker doesn't seem to provide a way to directly set the arch of an image
|
|
||||||
# at build time. To resolve the build vs. target arch discrepancy, we use
|
|
||||||
# Docker Buildx to build a new set of images with the correct target arch.
|
|
||||||
#
|
|
||||||
# Docker Buildx uses this Dockerfile to build an image for each requested
|
|
||||||
# platform. Since the Dockerfile basically consists of a single `FROM`
|
|
||||||
# instruction, we're effectively telling Buildx to build a platform-specific
|
|
||||||
# image by simply copying the existing cross-built image and setting the
|
|
||||||
# correct target arch as a side effect.
|
|
||||||
#
|
|
||||||
# References:
|
|
||||||
#
|
|
||||||
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
|
||||||
#
|
|
||||||
ARG LOCAL_REPO
|
|
||||||
ARG DOCKER_TAG
|
|
||||||
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
|
196
docker/Dockerfile.debian
Normal file
196
docker/Dockerfile.debian
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2024.1.2]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b as vault
|
||||||
|
|
||||||
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.75.0-slim-bookworm as build
|
||||||
|
COPY --from=xx / /
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
|
RUN xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Environment variables for cargo across Debian and Alpine
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
if xx-info is-cross ; then \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/debian:bookworm-slim
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,68 +1,14 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
{% set rust_version = "1.70.0" %}
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
{% set debian_version = "bullseye" %}
|
|
||||||
{% set alpine_version = "3.17" %}
|
|
||||||
{% set build_stage_base_image = "docker.io/library/rust:%s-%s" % (rust_version, debian_version) %}
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "amd64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:x86_64-musl-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/library/alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:armv7-musleabihf-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:arm-musleabi-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:aarch64-musl-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
|
||||||
{% endif %}
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/library/debian:%s-slim" % debian_version %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "arm64" %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
|
||||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "armel" %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "armhf" %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_name is defined %}
|
|
||||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_prefix = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_target_param = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if "buildkit" in target_file %}
|
|
||||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
|
||||||
{% else %}
|
|
||||||
{% set mount_rust_cache = "" %}
|
|
||||||
{% endif %}
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
{% set vault_version = "v2023.5.0" %}
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_image_digest = "sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085" %}
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# Using the digest instead of the tag name provides better security,
|
# Using the digest instead of the tag name provides better security,
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
@@ -80,10 +26,33 @@
|
|||||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||||
#
|
#
|
||||||
FROM docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
{% if base == "debian" %}
|
||||||
FROM {{ build_stage_base_image }} as build
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
{% for arch in build_stage_image[base].arch_image %}
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build
|
||||||
|
{% if base == "debian" %}
|
||||||
|
COPY --from=xx / /
|
||||||
|
{% endif %}
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
@@ -92,134 +61,163 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
|||||||
TERM=xterm-256color \
|
TERM=xterm-256color \
|
||||||
CARGO_HOME="/root/.cargo" \
|
CARGO_HOME="/root/.cargo" \
|
||||||
USER="root"
|
USER="root"
|
||||||
|
{%- if base == "alpine" %} \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if base == "debian" %}
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
|
RUN xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
&& rustup set profile minimal
|
&& rustup set profile minimal
|
||||||
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "armv6" in target_file %}
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
|
||||||
{% endif %}
|
|
||||||
{% elif "arm" in target_file %}
|
|
||||||
# Install build dependencies for the {{ package_arch_name }} architecture
|
|
||||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-{{ package_cross_compiler }} \
|
|
||||||
libc6-dev{{ package_arch_prefix }} \
|
|
||||||
libmariadb-dev{{ package_arch_prefix }} \
|
|
||||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
|
||||||
libmariadb3{{ package_arch_prefix }} \
|
|
||||||
libpq-dev{{ package_arch_prefix }} \
|
|
||||||
libpq5{{ package_arch_prefix }} \
|
|
||||||
libssl-dev{{ package_arch_prefix }} \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
{% if base == "debian" %}
|
||||||
COPY ./Cargo.* ./
|
# Environment variables for cargo across Debian and Alpine
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
RUN source /env-cargo && \
|
||||||
COPY ./build.rs ./build.rs
|
if xx-info is-cross ; then \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
{% if package_arch_target is defined %}
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
{% endif %}
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
{% if "alpine" in target_file %}
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
# Shared variables across Debian and Alpine
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
{% else %}
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
RUN source /env-cargo && \
|
||||||
&& find . -not -path "./target*" -delete
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
# Builds again, this time it will be the actual source files being build
|
||||||
RUN touch src/main.rs
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM {{ runtime_stage_base_image }}
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM {{ runtime_stage_image[base] }}
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
ENV ROCKET_PROFILE="release" \
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ROCKET_PORT=80
|
ROCKET_PORT=80
|
||||||
{%- if "alpine" in runtime_stage_base_image %} \
|
{%- if base == "debian" %} \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
{% elif base == "alpine" %} \
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN mkdir /data \
|
RUN mkdir /data && \
|
||||||
{% if "alpine" in runtime_stage_base_image %}
|
{% if base == "debian" %}
|
||||||
&& apk add --no-cache \
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
apk --no-cache add \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
openssl \
|
openssl \
|
||||||
tzdata
|
tzdata
|
||||||
{% else %}
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -229,16 +227,13 @@ EXPOSE 3012
|
|||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
|
||||||
{% else %}
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
@@ -1,15 +1,4 @@
|
|||||||
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
all:
|
||||||
|
./render_template Dockerfile.j2 '{"base": "debian"}' > Dockerfile.debian
|
||||||
all: $(OBJECTS)
|
./render_template Dockerfile.j2 '{"base": "alpine"}' > Dockerfile.alpine
|
||||||
|
.PHONY: all
|
||||||
%/Dockerfile: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
184
docker/README.md
184
docker/README.md
@@ -1,3 +1,183 @@
|
|||||||
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
# Vaultwarden Container Building
|
||||||
|
|
||||||
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
To build and release new testing and stable releases of Vaultwarden we use `docker buildx bake`.<br>
|
||||||
|
This can be used locally by running the command yourself, but it is also used by GitHub Actions.
|
||||||
|
|
||||||
|
This makes it easier for us to test and maintain the different architectures we provide.<br>
|
||||||
|
We also just have two Dockerfile's one for Debian and one for Alpine based images.<br>
|
||||||
|
With just these two files we can build both Debian and Alpine images for the following platforms:
|
||||||
|
- amd64 (linux/amd64)
|
||||||
|
- arm64 (linux/arm64)
|
||||||
|
- armv7 (linux/arm/v7)
|
||||||
|
- armv6 (linux/arm/v6)
|
||||||
|
|
||||||
|
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||||
|
This ensures the container build process can run binaries from other architectures.<br>
|
||||||
|
|
||||||
|
**NOTE**: Run all the examples below from the root of the repo.<br>
|
||||||
|
|
||||||
|
|
||||||
|
## How to install QEMU binfmt support
|
||||||
|
|
||||||
|
This is different per host OS, but most support this in some way.<br>
|
||||||
|
|
||||||
|
### Ubuntu/Debian
|
||||||
|
```bash
|
||||||
|
apt install binfmt-support qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Arch Linux (others based upon it)
|
||||||
|
```bash
|
||||||
|
pacman -S qemu-user-static qemu-user-static-binfmt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fedora
|
||||||
|
```bash
|
||||||
|
dnf install qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Others
|
||||||
|
There also is an option to use an other docker container to provide support for this.
|
||||||
|
```bash
|
||||||
|
# To install and activate
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To unistall
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Single architecture container building
|
||||||
|
|
||||||
|
You can build a container per supported architecture as long as you have QEMU binfmt support installed on your system.<br>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Default bake triggers a Debian build using the hosts architecture
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl
|
||||||
|
|
||||||
|
# Bake Debian ARM64 using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl debian-arm64
|
||||||
|
|
||||||
|
# Bake Alpine ARMv6 as a release build
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-armv6
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Local Multi Architecture container building
|
||||||
|
|
||||||
|
Start the initialization, this only needs to be done once.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create and use a new buildx builder instance which connects to the host network
|
||||||
|
docker buildx create --name vaultwarden --use --driver-opt network=host
|
||||||
|
|
||||||
|
# Validate it runs
|
||||||
|
docker buildx inspect --bootstrap
|
||||||
|
|
||||||
|
# Create a local container registry directly reachable on the localhost
|
||||||
|
docker run -d --name registry --network host registry:2
|
||||||
|
```
|
||||||
|
|
||||||
|
After that is done, you should be able to build and push to the local registry.<br>
|
||||||
|
Use the following command with the modified variables to bake the Alpine images.<br>
|
||||||
|
Replace `alpine` with `debian` if you want to build the debian multi arch images.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start a buildx bake using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `bake.sh` script
|
||||||
|
|
||||||
|
To make it a bit more easier to trigger a build, there also is a `bake.sh` script.<br>
|
||||||
|
This script calls `docker buildx bake` with all the right parameters and also generates the `SOURCE_COMMIT` and `SOURCE_VERSION` variables.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
So, if you want to build a Multi Arch Alpine container pushing to your localhost registry you can run this from within the docker directory. (Just make sure you executed the initialization steps above first)
|
||||||
|
```bash
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
./bake.sh alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append both `alpine` and `debian` with `-amd64`, `-arm64`, `-armv7` or `-armv6`, which will trigger a build for that specific platform.<br>
|
||||||
|
This will also append those values to the tag so you can see the builded container when running `docker images`.
|
||||||
|
|
||||||
|
You can also append extra arguments after the target if you want. This can be useful for example to print what bake will use.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh alpine-all --print
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing baked images
|
||||||
|
|
||||||
|
To test these images you can run these images by using the correct tag and provide the platform.<br>
|
||||||
|
For example, after you have build an arm64 image via `./bake.sh debian-arm64` you can run:
|
||||||
|
```bash
|
||||||
|
docker run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `podman-bake.sh` script
|
||||||
|
|
||||||
|
To also make building easier using podman, there is a `podman-bake.sh` script.<br>
|
||||||
|
This script calls `podman buildx build` with the needed parameters and the same as `bake.sh`, it will generate some variables automatically.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
**NOTE:** Unlike the `bake.sh` script, this only supports a single `CONTAINER_REGISTRIES`, and a single `BASE_TAGS` value, no comma separated values. It also only supports building separate architectures, no Multi Arch containers.
|
||||||
|
|
||||||
|
To build an Alpine arm64 image with only sqlite support and mimalloc, run this:
|
||||||
|
```bash
|
||||||
|
DB="sqlite,enable_mimalloc" \
|
||||||
|
./podman-bake.sh alpine-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/podman-bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append extra arguments after the target if you want. This can be useful for example to disable cache like this.
|
||||||
|
```bash
|
||||||
|
./podman-bake.sh alpine-arm64 --no-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
For the podman builds you can, just like the `bake.sh` script, also append the architecture to build for that specific platform.<br>
|
||||||
|
|
||||||
|
### Testing podman builded images
|
||||||
|
|
||||||
|
The command to start a podman built container is almost the same as for the docker/bake built containers. The images start with `localhost/`, so you need to prepend that.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
localhost/vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Variables supported
|
||||||
|
| Variable | default | description |
|
||||||
|
| --------------------- | ------------------ | ----------- |
|
||||||
|
| CARGO_PROFILE | null | Which cargo profile to use. `null` means what is defined in the Dockerfile |
|
||||||
|
| DB | null | Which `features` to build. `null` means what is defined in the Dockerfile |
|
||||||
|
| SOURCE_REPOSITORY_URL | null | The source repository form where this build is triggered |
|
||||||
|
| SOURCE_COMMIT | null | The commit hash of the current commit for this build |
|
||||||
|
| SOURCE_VERSION | null | The current exact tag of this commit, else the last tag and the first 8 chars of the source commit |
|
||||||
|
| BASE_TAGS | testing | Tags to be used. Can be a comma separated value like "latest,1.29.2" |
|
||||||
|
| CONTAINER_REGISTRIES | vaultwarden/server | Comma separated value of container registries. Like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server` |
|
||||||
|
| VW_VERSION | null | To override the `SOURCE_VERSION` value. This is also used by the `build.rs` code for example |
|
||||||
|
@@ -1,118 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/debian:bullseye-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,112 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,118 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/debian:bullseye-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,112 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,139 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,114 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,139 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,114 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,143 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,116 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,143 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,116 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,139 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,114 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,139 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,114 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
15
docker/bake.sh
Executable file
15
docker/bake.sh
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Make sure we set the context to `..` so it will go up one directory
|
||||||
|
docker buildx bake --progress plain --set "*.context=${BASEDIR}/.." -f "${BASEDIR}/docker-bake.hcl" "$@"
|
33
docker/bake_env.sh
Normal file
33
docker/bake_env.sh
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# If SOURCE_COMMIT is provided via env skip this
|
||||||
|
if [ -z "${SOURCE_COMMIT+x}" ]; then
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If VW_VERSION is provided via env use it as SOURCE_VERSION
|
||||||
|
# Else define it using git
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
SOURCE_VERSION="${VW_VERSION}"
|
||||||
|
else
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||||
|
GIT_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||||
|
case "${GIT_BRANCH}" in
|
||||||
|
main|master|HEAD)
|
||||||
|
# Do not add the branch name for these branches
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
SOURCE_VERSION="${SOURCE_VERSION} (${GIT_BRANCH})"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export the rendered variables above so bake will use them
|
||||||
|
export SOURCE_COMMIT
|
||||||
|
export SOURCE_VERSION
|
235
docker/docker-bake.hcl
Normal file
235
docker/docker-bake.hcl
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
// ==== Baking Variables ====
|
||||||
|
|
||||||
|
// Set which cargo profile to use, dev or release for example
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "CARGO_PROFILE" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set which DB's (features) to enable
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "DB" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The repository this build was triggered from
|
||||||
|
variable "SOURCE_REPOSITORY_URL" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The commit hash of of the current commit this build was triggered on
|
||||||
|
variable "SOURCE_COMMIT" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The version of this build
|
||||||
|
// Typically the current exact tag of this commit,
|
||||||
|
// else the last tag and the first 8 characters of the source commit
|
||||||
|
variable "SOURCE_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// This can be used to overwrite SOURCE_VERSION
|
||||||
|
// It will be used during the build.rs building stage
|
||||||
|
variable "VW_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The base tag(s) to use
|
||||||
|
// This can be a comma separated value like "testing,1.29.2"
|
||||||
|
variable "BASE_TAGS" {
|
||||||
|
default = "testing"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Which container registries should be used for the tagging
|
||||||
|
// This can be a comma separated value
|
||||||
|
// Use a full URI like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server`
|
||||||
|
variable "CONTAINER_REGISTRIES" {
|
||||||
|
default = "vaultwarden/server"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking Groups ====
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["debian"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Shared Baking ====
|
||||||
|
function "labels" {
|
||||||
|
params = []
|
||||||
|
result = {
|
||||||
|
"org.opencontainers.image.description" = "Unofficial Bitwarden compatible server written in Rust - ${SOURCE_VERSION}"
|
||||||
|
"org.opencontainers.image.licenses" = "AGPL-3.0-only"
|
||||||
|
"org.opencontainers.image.documentation" = "https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
"org.opencontainers.image.url" = "https://github.com/dani-garcia/vaultwarden"
|
||||||
|
"org.opencontainers.image.created" = "${formatdate("YYYY-MM-DD'T'hh:mm:ssZZZZZ", timestamp())}"
|
||||||
|
"org.opencontainers.image.source" = "${SOURCE_REPOSITORY_URL}"
|
||||||
|
"org.opencontainers.image.revision" = "${SOURCE_COMMIT}"
|
||||||
|
"org.opencontainers.image.version" = "${SOURCE_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "_default_attributes" {
|
||||||
|
labels = labels()
|
||||||
|
args = {
|
||||||
|
DB = "${DB}"
|
||||||
|
CARGO_PROFILE = "${CARGO_PROFILE}"
|
||||||
|
VW_VERSION = "${VW_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Debian Baking ====
|
||||||
|
|
||||||
|
// Default Debian target, will build a container using the hosts platform architecture
|
||||||
|
target "debian" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.debian"
|
||||||
|
tags = generate_tags("", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "debian-multi" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "debian-amd64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-arm64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv7" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv6" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "debian-all" {
|
||||||
|
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Alpine Baking ====
|
||||||
|
|
||||||
|
// Default Alpine target, will build a container using the hosts platform architecture
|
||||||
|
target "alpine" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.alpine"
|
||||||
|
tags = generate_tags("-alpine", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "alpine-multi" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "alpine-amd64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("-alpine", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-arm64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("-alpine", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv7" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("-alpine", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv6" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "alpine-all" {
|
||||||
|
targets = ["alpine-amd64", "alpine-arm64", "alpine-armv7", "alpine-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Bake everything locally ====
|
||||||
|
|
||||||
|
group "all" {
|
||||||
|
targets = ["debian-all", "alpine-all"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking functions ====
|
||||||
|
|
||||||
|
// This will return the local platform as amd64, arm64 or armv7 for example
|
||||||
|
// It can be used for creating a local image tag
|
||||||
|
function "platform_tag" {
|
||||||
|
params = []
|
||||||
|
result = "-${replace(replace(BAKE_LOCAL_PLATFORM, "linux/", ""), "/", "")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function "get_container_registries" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", CONTAINER_REGISTRIES))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "get_base_tags" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", BASE_TAGS))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "generate_tags" {
|
||||||
|
params = [
|
||||||
|
suffix, // What to append to the BASE_TAG when needed, like `-alpine` for example
|
||||||
|
platform // the platform we are building for if needed
|
||||||
|
]
|
||||||
|
result = flatten([
|
||||||
|
for registry in get_container_registries() :
|
||||||
|
[for base_tag in get_base_tags() :
|
||||||
|
concat(
|
||||||
|
# If the base_tag contains latest, and the suffix contains `-alpine` add a `:alpine` tag too
|
||||||
|
base_tag == "latest" ? suffix == "-alpine" ? ["${registry}:alpine${platform}"] : [] : [],
|
||||||
|
# The default tagging strategy
|
||||||
|
["${registry}:${base_tag}${suffix}${platform}"]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
])
|
||||||
|
}
|
||||||
|
|
||||||
|
function "image_index_annotations" {
|
||||||
|
params = []
|
||||||
|
result = flatten([
|
||||||
|
for key, value in labels() :
|
||||||
|
value != null ? formatlist("annotation-index.%s=%s", "${key}", "${value}") : []
|
||||||
|
])
|
||||||
|
}
|
@@ -7,10 +7,20 @@
|
|||||||
|
|
||||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||||
|
|
||||||
|
# Check if there is a .env file configured
|
||||||
|
# If that is the case, load it into the environment before running any check
|
||||||
|
if [ -z "${ENV_FILE}" ]; then
|
||||||
|
ENV_FILE=".env"
|
||||||
|
fi
|
||||||
|
if [ -r "${ENV_FILE}" ]; then
|
||||||
|
# shellcheck disable=SC1090
|
||||||
|
. "${ENV_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
# Given a config key, return the corresponding config value from the
|
# Given a config key, return the corresponding config value from the
|
||||||
# config file. If the key doesn't exist, return an empty string.
|
# config file. If the key doesn't exist, return an empty string.
|
||||||
get_config_val() {
|
get_config_val() {
|
||||||
local key="$1"
|
key="$1"
|
||||||
# Extract a line of the form:
|
# Extract a line of the form:
|
||||||
# "domain": "https://bw.example.com/path",
|
# "domain": "https://bw.example.com/path",
|
||||||
grep "\"${key}\":" "${CONFIG_FILE}" |
|
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||||
|
105
docker/podman-bake.sh
Executable file
105
docker/podman-bake.sh
Executable file
@@ -0,0 +1,105 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Check if a target is given as first argument
|
||||||
|
# If not we assume the defaults and pass the given arguments to the podman command
|
||||||
|
case "${1}" in
|
||||||
|
alpine*|debian*)
|
||||||
|
TARGET="${1}"
|
||||||
|
# Now shift the $@ array so we only have the rest of the arguments
|
||||||
|
# This allows us too append these as extra arguments too the podman buildx build command
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
LABEL_ARGS=(
|
||||||
|
--label org.opencontainers.image.description="Unofficial Bitwarden compatible server written in Rust"
|
||||||
|
--label org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||||
|
--label org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
--label org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||||
|
--label org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
|
)
|
||||||
|
if [[ -n "${SOURCE_REPOSITORY_URL}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_COMMIT}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.revision="${SOURCE_COMMIT}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_VERSION}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.version="${SOURCE_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if and which --build-arg arguments we need to configure
|
||||||
|
BUILD_ARGS=()
|
||||||
|
if [[ -n "${DB}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg DB="${DB}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${CARGO_PROFILE}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg CARGO_PROFILE="${CARGO_PROFILE}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg VW_VERSION="${VW_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default BASE_TAGS if non are provided
|
||||||
|
if [[ -z "${BASE_TAGS}" ]]; then
|
||||||
|
BASE_TAGS="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default CONTAINER_REGISTRIES if non are provided
|
||||||
|
if [[ -z "${CONTAINER_REGISTRIES}" ]]; then
|
||||||
|
CONTAINER_REGISTRIES="vaultwarden/server"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check which Dockerfile we need to use, default is debian
|
||||||
|
case "${TARGET}" in
|
||||||
|
alpine*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-alpine"
|
||||||
|
DOCKERFILE="Dockerfile.alpine"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
DOCKERFILE="Dockerfile.debian"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Check which platform we need to build and append the BASE_TAGS with the architecture
|
||||||
|
case "${TARGET}" in
|
||||||
|
*-arm64)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-arm64"
|
||||||
|
PLATFORM="linux/arm64"
|
||||||
|
;;
|
||||||
|
*-armv7)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv7"
|
||||||
|
PLATFORM="linux/arm/v7"
|
||||||
|
;;
|
||||||
|
*-armv6)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv6"
|
||||||
|
PLATFORM="linux/arm/v6"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-amd64"
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Build the image with podman
|
||||||
|
# We use the docker format here since we are using `SHELL`, which is not supported by OCI
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
podman buildx build \
|
||||||
|
--platform="${PLATFORM}" \
|
||||||
|
--tag="${CONTAINER_REGISTRIES}:${BASE_TAGS}" \
|
||||||
|
--format=docker \
|
||||||
|
"${LABEL_ARGS[@]}" \
|
||||||
|
"${BUILD_ARGS[@]}" \
|
||||||
|
--file="${BASEDIR}/${DOCKERFILE}" "$@" \
|
||||||
|
"${BASEDIR}/.."
|
@@ -1,17 +1,31 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import os, argparse, json
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
|
# Load settings file
|
||||||
|
with open("DockerSettings.yaml", 'r') as yaml_file:
|
||||||
|
yaml_data = yaml.safe_load(yaml_file)
|
||||||
|
|
||||||
|
settings_env = jinja2.Environment(
|
||||||
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
|
)
|
||||||
|
settings_yaml = yaml.safe_load(settings_env.get_template("DockerSettings.yaml").render(yaml_data))
|
||||||
|
|
||||||
args_parser = argparse.ArgumentParser()
|
args_parser = argparse.ArgumentParser()
|
||||||
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
||||||
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
||||||
cli_args = args_parser.parse_args()
|
cli_args = args_parser.parse_args()
|
||||||
|
|
||||||
|
# Merge the default config yaml with the json arguments given.
|
||||||
render_vars = json.loads(cli_args.render_vars)
|
render_vars = json.loads(cli_args.render_vars)
|
||||||
|
settings_yaml.update(render_vars)
|
||||||
|
|
||||||
environment = jinja2.Environment(
|
environment = jinja2.Environment(
|
||||||
loader=jinja2.FileSystemLoader(os.getcwd()),
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
trim_blocks=True,
|
trim_blocks=True,
|
||||||
)
|
)
|
||||||
print(environment.get_template(cli_args.template_file).render(render_vars))
|
print(environment.get_template(cli_args.template_file).render(settings_yaml))
|
||||||
|
@@ -1,20 +0,0 @@
|
|||||||
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
|
||||||
|
|
||||||
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
|
||||||
|
|
||||||
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
|
||||||
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
|
||||||
* `COMMIT_MSG`: the message from the commit being tested and built.
|
|
||||||
* `DOCKER_REPO`: the name of the Docker repository being built.
|
|
||||||
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
|
||||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
|
||||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
|
||||||
|
|
||||||
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
|
||||||
|
|
||||||
## References
|
|
||||||
|
|
||||||
* https://docs.docker.com/docker-hub/builds/advanced/
|
|
||||||
* https://docs.docker.com/engine/reference/commandline/manifest/
|
|
||||||
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
|
||||||
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
|
@@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# The default Debian-based images support these arches for all database backends.
|
|
||||||
arches=(
|
|
||||||
amd64
|
|
||||||
armv6
|
|
||||||
armv7
|
|
||||||
arm64
|
|
||||||
)
|
|
||||||
export arches
|
|
||||||
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
distro_suffix=.alpine
|
|
||||||
fi
|
|
||||||
export distro_suffix
|
|
51
hooks/build
51
hooks/build
@@ -1,51 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
echo ">>> Building images..."
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
|
||||||
# This var is typically predefined by Docker Hub, but it won't be
|
|
||||||
# when testing locally.
|
|
||||||
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Construct a version string in the style of `build.rs`.
|
|
||||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
|
||||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
|
||||||
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
|
||||||
else
|
|
||||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
|
||||||
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
LABELS=(
|
|
||||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
|
||||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
|
||||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
|
||||||
org.opencontainers.image.licenses="AGPL-3.0-only"
|
|
||||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
|
||||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
|
||||||
org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
|
||||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
|
||||||
)
|
|
||||||
LABEL_ARGS=()
|
|
||||||
for label in "${LABELS[@]}"; do
|
|
||||||
LABEL_ARGS+=(--label "${label}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildkit as template
|
|
||||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
|
||||||
buildkit_suffix=.buildkit
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker build \
|
|
||||||
"${LABEL_ARGS[@]}" \
|
|
||||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
|
||||||
-f "docker/${arch}/Dockerfile${buildkit_suffix}${distro_suffix}" \
|
|
||||||
.
|
|
||||||
done
|
|
@@ -1,28 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# If requested, print some environment info for troubleshooting.
|
|
||||||
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
|
||||||
id
|
|
||||||
pwd
|
|
||||||
df -h
|
|
||||||
env
|
|
||||||
docker info
|
|
||||||
docker version
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install build dependencies.
|
|
||||||
deps=(
|
|
||||||
jq
|
|
||||||
)
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y "${deps[@]}"
|
|
||||||
|
|
||||||
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
|
||||||
# Git operations that we perform later, so fetch the complete history and
|
|
||||||
# tags first. Note that if the build is cached, the clone may have been
|
|
||||||
# unshallowed already; if so, unshallowing will fail, so skip it.
|
|
||||||
if [[ -f .git/shallow ]]; then
|
|
||||||
git fetch --unshallow --tags
|
|
||||||
fi
|
|
111
hooks/push
111
hooks/push
@@ -1,111 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
|
||||||
|
|
||||||
# Join a list of args with a single char.
|
|
||||||
# Ref: https://stackoverflow.com/a/17841619
|
|
||||||
join() { local IFS="$1"; shift; echo "$*"; }
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
echo ">>> Starting local Docker registry when needed..."
|
|
||||||
|
|
||||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
|
||||||
# builds, but it can't access existing images on the Docker host (like the
|
|
||||||
# cross-compiled ones we just built). Those images first need to be pushed to
|
|
||||||
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
|
||||||
# up those intermediate images on Docker Hub, it's easier to just run a local
|
|
||||||
# Docker registry, which gets cleaned up automatically once the build job ends.
|
|
||||||
#
|
|
||||||
# https://docs.docker.com/registry/deploying/
|
|
||||||
# https://hub.docker.com/_/registry
|
|
||||||
#
|
|
||||||
# Use host networking so the buildx container can access the registry via
|
|
||||||
# localhost.
|
|
||||||
#
|
|
||||||
# First check if there already is a registry container running, else skip it.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5000'; then
|
|
||||||
# defaults to port 5000
|
|
||||||
docker run -d --name registry --network host registry:2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
|
||||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
|
||||||
LOCAL_REGISTRY="localhost:5000"
|
|
||||||
REPO="${DOCKER_REPO#*/}"
|
|
||||||
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
|
||||||
|
|
||||||
echo ">>> Pushing images to local registry..."
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
docker tag "${docker_image}" "${local_image}"
|
|
||||||
docker push "${local_image}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ">>> Setting up Docker Buildx..."
|
|
||||||
|
|
||||||
# Same as earlier, use host networking so the buildx container can access the
|
|
||||||
# registry via localhost.
|
|
||||||
#
|
|
||||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
|
||||||
#
|
|
||||||
# Check if there already is a builder running, else skip this and use the existing.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! docker buildx inspect builder > /dev/null 2>&1 ; then
|
|
||||||
docker buildx create --name builder --use --driver-opt network=host
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ">>> Running Docker Buildx..."
|
|
||||||
|
|
||||||
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
|
||||||
|
|
||||||
# If the Docker tag starts with a version number, assume the latest release
|
|
||||||
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
|
||||||
# to make it easier for users to track the latest release.
|
|
||||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
tags+=("${DOCKER_REPO}:alpine")
|
|
||||||
else
|
|
||||||
tags+=("${DOCKER_REPO}:latest")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
tag_args=()
|
|
||||||
for tag in "${tags[@]}"; do
|
|
||||||
tag_args+=(--tag "${tag}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
|
||||||
# the arch list to a platform list (assuming the OS is always `linux`).
|
|
||||||
declare -A arch_to_platform=(
|
|
||||||
[amd64]="linux/amd64"
|
|
||||||
[armv6]="linux/arm/v6"
|
|
||||||
[armv7]="linux/arm/v7"
|
|
||||||
[arm64]="linux/arm64"
|
|
||||||
)
|
|
||||||
platforms=()
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
platforms+=("${arch_to_platform[$arch]}")
|
|
||||||
done
|
|
||||||
platform="$(join "," "${platforms[@]}")"
|
|
||||||
|
|
||||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
|
||||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
|
||||||
# context, which isn't needed here since the actual cross-compiled images
|
|
||||||
# have already been built.
|
|
||||||
docker buildx build \
|
|
||||||
--network host \
|
|
||||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
|
||||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
|
||||||
--platform "${platform}" \
|
|
||||||
"${tag_args[@]}" \
|
|
||||||
--push \
|
|
||||||
- < ./docker/Dockerfile.buildx
|
|
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL,
|
||||||
|
organization_uuid CHAR(36),
|
||||||
|
request_device_identifier CHAR(36) NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id CHAR(36),
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY master_password_hash TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY enc_key TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN `key` TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE attachments MODIFY file_size BIGINT NOT NULL;
|
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL,
|
||||||
|
organization_uuid CHAR(36),
|
||||||
|
request_device_identifier CHAR(36) NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id CHAR(36),
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date TIMESTAMP NOT NULL,
|
||||||
|
response_date TIMESTAMP,
|
||||||
|
authentication_date TIMESTAMP,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN master_password_hash DROP NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN enc_key DROP NOT NULL;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE attachments
|
||||||
|
ALTER COLUMN file_size TYPE BIGINT,
|
||||||
|
ALTER COLUMN file_size SET NOT NULL;
|
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
organization_uuid TEXT,
|
||||||
|
request_device_identifier TEXT NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id TEXT,
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
@@ -0,0 +1,29 @@
|
|||||||
|
-- Create new auth_requests table with master_password_hash as nullable column
|
||||||
|
CREATE TABLE auth_requests_new (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
organization_uuid TEXT,
|
||||||
|
request_device_identifier TEXT NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id TEXT,
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT,
|
||||||
|
master_password_hash TEXT,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY (user_uuid) REFERENCES users (uuid),
|
||||||
|
FOREIGN KEY (organization_uuid) REFERENCES organizations (uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer current data to new table
|
||||||
|
INSERT INTO auth_requests_new SELECT * FROM auth_requests;
|
||||||
|
|
||||||
|
-- Drop the old table
|
||||||
|
DROP TABLE auth_requests;
|
||||||
|
|
||||||
|
-- Rename the new table to the original name
|
||||||
|
ALTER TABLE auth_requests_new RENAME TO auth_requests;
|
@@ -0,0 +1,2 @@
|
|||||||
|
-- Add the external_id to the users_organizations table
|
||||||
|
ALTER TABLE "users_organizations" ADD COLUMN "external_id" TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
-- Integer size in SQLite is already i64, so we don't need to do anything
|
@@ -1 +0,0 @@
|
|||||||
1.70.0
|
|
4
rust-toolchain.toml
Normal file
4
rust-toolchain.toml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[toolchain]
|
||||||
|
channel = "1.75.0"
|
||||||
|
components = [ "rustfmt", "clippy" ]
|
||||||
|
profile = "minimal"
|
@@ -13,7 +13,10 @@ use rocket::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::log_event, unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
|
api::{
|
||||||
|
core::{log_event, two_factor},
|
||||||
|
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
||||||
|
},
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
@@ -21,6 +24,7 @@ use crate::{
|
|||||||
mail,
|
mail,
|
||||||
util::{
|
util::{
|
||||||
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
||||||
|
NumberOrString,
|
||||||
},
|
},
|
||||||
CONFIG, VERSION,
|
CONFIG, VERSION,
|
||||||
};
|
};
|
||||||
@@ -184,12 +188,11 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
|||||||
let claims = generate_admin_claims();
|
let claims = generate_admin_claims();
|
||||||
let jwt = encode_jwt(&claims);
|
let jwt = encode_jwt(&claims);
|
||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true);
|
||||||
.finish();
|
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
if let Some(redirect) = redirect {
|
if let Some(redirect) = redirect {
|
||||||
@@ -279,12 +282,11 @@ async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
|||||||
#[post("/invite", data = "<data>")]
|
#[post("/invite", data = "<data>")]
|
||||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
let email = data.email.clone();
|
|
||||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||||
err_code!("User already exists", Status::Conflict.code)
|
err_code!("User already exists", Status::Conflict.code)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = User::new(email);
|
let mut user = User::new(data.email);
|
||||||
|
|
||||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
@@ -314,7 +316,7 @@ async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
|||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
Redirect::to(admin_path())
|
Redirect::to(admin_path())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,6 +328,10 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
|||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["UserEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
|
usr["LastActive"] = match u.last_active(&mut conn).await {
|
||||||
|
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||||
|
None => json!(None::<String>),
|
||||||
|
};
|
||||||
users_json.push(usr);
|
users_json.push(usr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -340,7 +346,7 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await));
|
||||||
usr["user_enabled"] = json!(u.enabled);
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||||
@@ -388,7 +394,7 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe
|
|||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
&user_org.org_uuid,
|
&user_org.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -407,7 +413,7 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif
|
|||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||||
match unregister_push_device(device.uuid).await {
|
match unregister_push_device(device.push_uuid).await {
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
||||||
};
|
};
|
||||||
@@ -443,9 +449,10 @@ async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyR
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
async fn remove_2fa(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
|
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&mut conn).await
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
@@ -515,7 +522,7 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
|||||||
EventType::OrganizationUserUpdated as i32,
|
EventType::OrganizationUserUpdated as i32,
|
||||||
&user_to_edit.uuid,
|
&user_to_edit.uuid,
|
||||||
&data.org_uuid,
|
&data.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -543,7 +550,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
|||||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await));
|
||||||
organizations_json.push(org);
|
organizations_json.push(org);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -783,16 +790,16 @@ impl<'r> FromRequest<'r> for AdminToken {
|
|||||||
if requested_page.is_empty() {
|
if requested_page.is_empty() {
|
||||||
return Outcome::Forward(Status::Unauthorized);
|
return Outcome::Forward(Status::Unauthorized);
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::Unauthorized, "Unauthorized"));
|
return Outcome::Error((Status::Unauthorized, "Unauthorized"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if decode_admin(access_token).is_err() {
|
if decode_admin(access_token).is_err() {
|
||||||
// Remove admin cookie
|
// Remove admin cookie
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
return Outcome::Error((Status::Unauthorized, "Session expired"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
|
@@ -1,16 +1,19 @@
|
|||||||
|
use crate::db::DbPool;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, register_push_device, unregister_push_device, EmptyResult, JsonResult, JsonUpcase,
|
core::log_user_event, register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult,
|
||||||
Notify, NumberOrString, PasswordData, UpdateType,
|
JsonUpcase, Notify, PasswordOrOtpData, UpdateType,
|
||||||
},
|
},
|
||||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
use rocket::{
|
use rocket::{
|
||||||
@@ -51,6 +54,11 @@ pub fn routes() -> Vec<rocket::Route> {
|
|||||||
put_device_token,
|
put_device_token,
|
||||||
put_clear_device_token,
|
put_clear_device_token,
|
||||||
post_clear_device_token,
|
post_clear_device_token,
|
||||||
|
post_auth_request,
|
||||||
|
get_auth_request,
|
||||||
|
put_auth_request,
|
||||||
|
get_auth_request_response,
|
||||||
|
get_auth_requests,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,8 +281,9 @@ async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, mut conn: Db
|
|||||||
#[get("/users/<uuid>/public-key")]
|
#[get("/users/<uuid>/public-key")]
|
||||||
async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let user = match User::find_by_uuid(uuid, &mut conn).await {
|
let user = match User::find_by_uuid(uuid, &mut conn).await {
|
||||||
Some(user) => user,
|
Some(user) if user.public_key.is_some() => user,
|
||||||
None => err!("User doesn't exist"),
|
Some(_) => err_code!("User has no public_key", Status::NotFound.code),
|
||||||
|
None => err_code!("User doesn't exist", Status::NotFound.code),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
@@ -340,7 +349,7 @@ async fn post_password(
|
|||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
let save_result = user.save(&mut conn).await;
|
||||||
|
|
||||||
// Prevent loging out the client where the user requested this endpoint from.
|
// Prevent logging out the client where the user requested this endpoint from.
|
||||||
// If you do logout the user it will causes issues at the client side.
|
// If you do logout the user it will causes issues at the client side.
|
||||||
// Adding the device uuid will prevent this.
|
// Adding the device uuid will prevent this.
|
||||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||||
@@ -487,7 +496,7 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
|||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
let save_result = user.save(&mut conn).await;
|
||||||
|
|
||||||
// Prevent loging out the client where the user requested this endpoint from.
|
// Prevent logging out the client where the user requested this endpoint from.
|
||||||
// If you do logout the user it will causes issues at the client side.
|
// If you do logout the user it will causes issues at the client side.
|
||||||
// Adding the device uuid will prevent this.
|
// Adding the device uuid will prevent this.
|
||||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||||
@@ -497,17 +506,15 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
|||||||
|
|
||||||
#[post("/accounts/security-stamp", data = "<data>")]
|
#[post("/accounts/security-stamp", data = "<data>")]
|
||||||
async fn post_sstamp(
|
async fn post_sstamp(
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
@@ -527,6 +534,10 @@ struct EmailTokenData {
|
|||||||
|
|
||||||
#[post("/accounts/email-token", data = "<data>")]
|
#[post("/accounts/email-token", data = "<data>")]
|
||||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.email_change_allowed() {
|
||||||
|
err!("Email change is not allowed.");
|
||||||
|
}
|
||||||
|
|
||||||
let data: EmailTokenData = data.into_inner().data;
|
let data: EmailTokenData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -573,6 +584,10 @@ async fn post_email(
|
|||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
if !CONFIG.email_change_allowed() {
|
||||||
|
err!("Email change is not allowed.");
|
||||||
|
}
|
||||||
|
|
||||||
let data: ChangeEmailData = data.into_inner().data;
|
let data: ChangeEmailData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -722,18 +737,16 @@ async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, mut
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete", data = "<data>")]
|
#[post("/accounts/delete", data = "<data>")]
|
||||||
async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_delete_account(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
delete_account(data, headers, conn).await
|
delete_account(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/accounts", data = "<data>")]
|
#[delete("/accounts", data = "<data>")]
|
||||||
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn delete_account(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
user.delete(&mut conn).await
|
user.delete(&mut conn).await
|
||||||
}
|
}
|
||||||
@@ -840,20 +853,13 @@ fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _api_key(
|
async fn _api_key(data: JsonUpcase<PasswordOrOtpData>, rotate: bool, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
data: JsonUpcase<SecretVerificationRequest>,
|
|
||||||
rotate: bool,
|
|
||||||
headers: Headers,
|
|
||||||
mut conn: DbConn,
|
|
||||||
) -> JsonResult {
|
|
||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
let data: SecretVerificationRequest = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
if rotate || user.api_key.is_none() {
|
if rotate || user.api_key.is_none() {
|
||||||
user.api_key = Some(crypto::generate_api_key());
|
user.api_key = Some(crypto::generate_api_key());
|
||||||
@@ -868,12 +874,12 @@ async fn _api_key(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/api-key", data = "<data>")]
|
#[post("/accounts/api-key", data = "<data>")]
|
||||||
async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn api_key(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
_api_key(data, false, headers, conn).await
|
_api_key(data, false, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/rotate-api-key", data = "<data>")]
|
#[post("/accounts/rotate-api-key", data = "<data>")]
|
||||||
async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn rotate_api_key(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
_api_key(data, true, headers, conn).await
|
_api_key(data, true, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -907,26 +913,23 @@ impl<'r> FromRequest<'r> for KnownDevice {
|
|||||||
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
||||||
Ok(bytes) => bytes,
|
Ok(bytes) => bytes,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Outcome::Failure((
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
|
||||||
Status::BadRequest,
|
|
||||||
"X-Request-Email value failed to decode as base64url",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
match String::from_utf8(email_bytes) {
|
match String::from_utf8(email_bytes) {
|
||||||
Ok(email) => email,
|
Ok(email) => email,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value is required"));
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value is required"));
|
||||||
};
|
};
|
||||||
|
|
||||||
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
||||||
uuid.to_string()
|
uuid.to_string()
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Device-Identifier value is required"));
|
return Outcome::Error((Status::BadRequest, "X-Device-Identifier value is required"));
|
||||||
};
|
};
|
||||||
|
|
||||||
Outcome::Success(KnownDevice {
|
Outcome::Success(KnownDevice {
|
||||||
@@ -949,27 +952,34 @@ async fn post_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Hea
|
|||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||||
async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
if !CONFIG.push_enabled() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let data = data.into_inner().data;
|
let data = data.into_inner().data;
|
||||||
let token = data.PushToken;
|
let token = data.PushToken;
|
||||||
|
|
||||||
let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await {
|
let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await {
|
||||||
Some(device) => device,
|
Some(device) => device,
|
||||||
None => err!(format!("Error: device {uuid} should be present before a token can be assigned")),
|
None => err!(format!("Error: device {uuid} should be present before a token can be assigned")),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// if the device already has been registered
|
||||||
|
if device.is_registered() {
|
||||||
|
// check if the new token is the same as the registered token
|
||||||
|
if device.push_token.is_some() && device.push_token.unwrap() == token.clone() {
|
||||||
|
debug!("Device {} is already registered and token is the same", uuid);
|
||||||
|
return Ok(());
|
||||||
|
} else {
|
||||||
|
// Try to unregister already registered device
|
||||||
|
let _ = unregister_push_device(device.push_uuid).await;
|
||||||
|
}
|
||||||
|
// clear the push_uuid
|
||||||
|
device.push_uuid = None;
|
||||||
|
}
|
||||||
device.push_token = Some(token);
|
device.push_token = Some(token);
|
||||||
if device.push_uuid.is_none() {
|
|
||||||
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
|
||||||
}
|
|
||||||
if let Err(e) = device.save(&mut conn).await {
|
if let Err(e) = device.save(&mut conn).await {
|
||||||
err!(format!("An error occured while trying to save the device push token: {e}"));
|
err!(format!("An error occurred while trying to save the device push token: {e}"));
|
||||||
}
|
|
||||||
if let Err(e) = register_push_device(headers.user.uuid, device).await {
|
|
||||||
err!(format!("An error occured while proceeding registration of a device: {e}"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
register_push_device(&mut device, &mut conn).await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -985,7 +995,7 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
|||||||
|
|
||||||
if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await {
|
if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await {
|
||||||
Device::clear_push_token_by_uuid(uuid, &mut conn).await?;
|
Device::clear_push_token_by_uuid(uuid, &mut conn).await?;
|
||||||
unregister_push_device(device.uuid).await?;
|
unregister_push_device(device.push_uuid).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -996,3 +1006,211 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
|||||||
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
|
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
|
||||||
put_clear_device_token(uuid, conn).await
|
put_clear_device_token(uuid, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct AuthRequestRequest {
|
||||||
|
accessCode: String,
|
||||||
|
deviceIdentifier: String,
|
||||||
|
email: String,
|
||||||
|
publicKey: String,
|
||||||
|
#[serde(alias = "type")]
|
||||||
|
_type: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/auth-requests", data = "<data>")]
|
||||||
|
async fn post_auth_request(
|
||||||
|
data: Json<AuthRequestRequest>,
|
||||||
|
headers: ClientHeaders,
|
||||||
|
mut conn: DbConn,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
|
let data = data.into_inner();
|
||||||
|
|
||||||
|
let user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut auth_request = AuthRequest::new(
|
||||||
|
user.uuid.clone(),
|
||||||
|
data.deviceIdentifier.clone(),
|
||||||
|
headers.device_type,
|
||||||
|
headers.ip.ip.to_string(),
|
||||||
|
data.accessCode,
|
||||||
|
data.publicKey,
|
||||||
|
);
|
||||||
|
auth_request.save(&mut conn).await?;
|
||||||
|
|
||||||
|
nt.send_auth_request(&user.uuid, &auth_request.uuid, &data.deviceIdentifier, &mut conn).await;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"id": auth_request.uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": null,
|
||||||
|
"masterPasswordHash": null,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": null,
|
||||||
|
"requestApproved": false,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object": "auth-request"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/auth-requests/<uuid>")]
|
||||||
|
async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult {
|
||||||
|
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Some(auth_request) => auth_request,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
Ok(Json(json!(
|
||||||
|
{
|
||||||
|
"id": uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": auth_request.enc_key,
|
||||||
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": auth_request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
}
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct AuthResponseRequest {
|
||||||
|
deviceIdentifier: String,
|
||||||
|
key: String,
|
||||||
|
masterPasswordHash: Option<String>,
|
||||||
|
requestApproved: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/auth-requests/<uuid>", data = "<data>")]
|
||||||
|
async fn put_auth_request(
|
||||||
|
uuid: &str,
|
||||||
|
data: Json<AuthResponseRequest>,
|
||||||
|
mut conn: DbConn,
|
||||||
|
ant: AnonymousNotify<'_>,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
|
let data = data.into_inner();
|
||||||
|
let mut auth_request: AuthRequest = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Some(auth_request) => auth_request,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auth_request.approved = Some(data.requestApproved);
|
||||||
|
auth_request.enc_key = Some(data.key);
|
||||||
|
auth_request.master_password_hash = data.masterPasswordHash;
|
||||||
|
auth_request.response_device_id = Some(data.deviceIdentifier.clone());
|
||||||
|
auth_request.save(&mut conn).await?;
|
||||||
|
|
||||||
|
if auth_request.approved.unwrap_or(false) {
|
||||||
|
ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await;
|
||||||
|
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.deviceIdentifier, &mut conn).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
Ok(Json(json!(
|
||||||
|
{
|
||||||
|
"id": uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": auth_request.enc_key,
|
||||||
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": auth_request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
}
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/auth-requests/<uuid>/response?<code>")]
|
||||||
|
async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) -> JsonResult {
|
||||||
|
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Some(auth_request) => auth_request,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !auth_request.check_access_code(code) {
|
||||||
|
err!("Access code invalid doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
Ok(Json(json!(
|
||||||
|
{
|
||||||
|
"id": uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": auth_request.enc_key,
|
||||||
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": auth_request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
}
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/auth-requests")]
|
||||||
|
async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
|
let auth_requests = AuthRequest::find_by_user(&headers.user.uuid, &mut conn).await;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"data": auth_requests
|
||||||
|
.iter()
|
||||||
|
.filter(|request| request.approved.is_none())
|
||||||
|
.map(|request| {
|
||||||
|
let response_date_utc = request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
json!({
|
||||||
|
"id": request.uuid,
|
||||||
|
"publicKey": request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(request.device_type).to_string(),
|
||||||
|
"requestIpAddress": request.request_ip,
|
||||||
|
"key": request.enc_key,
|
||||||
|
"masterPasswordHash": request.master_password_hash,
|
||||||
|
"creationDate": request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
})
|
||||||
|
}).collect::<Vec<Value>>(),
|
||||||
|
"continuationToken": null,
|
||||||
|
"object": "list"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn purge_auth_requests(pool: DbPool) {
|
||||||
|
debug!("Purging auth requests");
|
||||||
|
if let Ok(mut conn) = pool.get().await {
|
||||||
|
AuthRequest::purge_expired_auth_requests(&mut conn).await;
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while purging trashed ciphers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
|
use num_traits::ToPrimitive;
|
||||||
use rocket::fs::TempFile;
|
use rocket::fs::TempFile;
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::{
|
use rocket::{
|
||||||
@@ -10,7 +11,7 @@ use rocket::{
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
@@ -206,12 +207,13 @@ pub struct CipherData {
|
|||||||
// TODO: Some of these might appear all the time, no need for Option
|
// TODO: Some of these might appear all the time, no need for Option
|
||||||
OrganizationId: Option<String>,
|
OrganizationId: Option<String>,
|
||||||
|
|
||||||
|
Key: Option<String>,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Login = 1,
|
Login = 1,
|
||||||
SecureNote = 2,
|
SecureNote = 2,
|
||||||
Card = 3,
|
Card = 3,
|
||||||
Identity = 4,
|
Identity = 4
|
||||||
Fido2Key = 5
|
|
||||||
*/
|
*/
|
||||||
pub Type: i32,
|
pub Type: i32,
|
||||||
pub Name: String,
|
pub Name: String,
|
||||||
@@ -223,7 +225,6 @@ pub struct CipherData {
|
|||||||
SecureNote: Option<Value>,
|
SecureNote: Option<Value>,
|
||||||
Card: Option<Value>,
|
Card: Option<Value>,
|
||||||
Identity: Option<Value>,
|
Identity: Option<Value>,
|
||||||
Fido2Key: Option<Value>,
|
|
||||||
|
|
||||||
Favorite: Option<bool>,
|
Favorite: Option<bool>,
|
||||||
Reprompt: Option<i32>,
|
Reprompt: Option<i32>,
|
||||||
@@ -359,14 +360,17 @@ pub async fn update_cipher_from_data(
|
|||||||
enforce_personal_ownership_policy(Some(&data), headers, conn).await?;
|
enforce_personal_ownership_policy(Some(&data), headers, conn).await?;
|
||||||
|
|
||||||
// Check that the client isn't updating an existing cipher with stale data.
|
// Check that the client isn't updating an existing cipher with stale data.
|
||||||
if let Some(dt) = data.LastKnownRevisionDate {
|
// And only perform this check when not importing ciphers, else the date/time check will fail.
|
||||||
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
if ut != UpdateType::None {
|
||||||
// ISO 8601 format
|
if let Some(dt) = data.LastKnownRevisionDate {
|
||||||
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
// ISO 8601 format
|
||||||
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||||
|
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||||
|
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||||
|
}
|
||||||
|
Ok(_) => (),
|
||||||
}
|
}
|
||||||
Ok(_) => (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -466,7 +470,6 @@ pub async fn update_cipher_from_data(
|
|||||||
2 => data.SecureNote,
|
2 => data.SecureNote,
|
||||||
3 => data.Card,
|
3 => data.Card,
|
||||||
4 => data.Identity,
|
4 => data.Identity,
|
||||||
5 => data.Fido2Key,
|
|
||||||
_ => err!("Invalid type"),
|
_ => err!("Invalid type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -483,6 +486,7 @@ pub async fn update_cipher_from_data(
|
|||||||
None => err!("Data missing"),
|
None => err!("Data missing"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
cipher.key = data.Key;
|
||||||
cipher.name = data.Name;
|
cipher.name = data.Name;
|
||||||
cipher.notes = data.Notes;
|
cipher.notes = data.Notes;
|
||||||
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
||||||
@@ -507,7 +511,7 @@ pub async fn update_cipher_from_data(
|
|||||||
event_type as i32,
|
event_type as i32,
|
||||||
&cipher.uuid,
|
&cipher.uuid,
|
||||||
org_uuid,
|
org_uuid,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -788,7 +792,7 @@ async fn post_collections_admin(
|
|||||||
EventType::CipherUpdatedCollections as i32,
|
EventType::CipherUpdatedCollections as i32,
|
||||||
&cipher.uuid,
|
&cipher.uuid,
|
||||||
&cipher.organization_uuid.unwrap(),
|
&cipher.organization_uuid.unwrap(),
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -846,7 +850,6 @@ async fn put_cipher_share_selected(
|
|||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let mut data: ShareSelectedCipherData = data.into_inner().data;
|
let mut data: ShareSelectedCipherData = data.into_inner().data;
|
||||||
let mut cipher_ids: Vec<String> = Vec::new();
|
|
||||||
|
|
||||||
if data.Ciphers.is_empty() {
|
if data.Ciphers.is_empty() {
|
||||||
err!("You must select at least one cipher.")
|
err!("You must select at least one cipher.")
|
||||||
@@ -857,10 +860,9 @@ async fn put_cipher_share_selected(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for cipher in data.Ciphers.iter() {
|
for cipher in data.Ciphers.iter() {
|
||||||
match cipher.Id {
|
if cipher.Id.is_none() {
|
||||||
Some(ref id) => cipher_ids.push(id.to_string()),
|
err!("Request missing ids field")
|
||||||
None => err!("Request missing ids field"),
|
}
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
while let Some(cipher) = data.Ciphers.pop() {
|
while let Some(cipher) = data.Ciphers.pop() {
|
||||||
@@ -955,7 +957,7 @@ async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut c
|
|||||||
struct AttachmentRequestData {
|
struct AttachmentRequestData {
|
||||||
Key: String,
|
Key: String,
|
||||||
FileName: String,
|
FileName: String,
|
||||||
FileSize: i32,
|
FileSize: i64,
|
||||||
AdminRequest: Option<bool>, // true when attaching from an org vault view
|
AdminRequest: Option<bool>, // true when attaching from an org vault view
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -984,8 +986,11 @@ async fn post_attachment_v2(
|
|||||||
err!("Cipher is not write accessible")
|
err!("Cipher is not write accessible")
|
||||||
}
|
}
|
||||||
|
|
||||||
let attachment_id = crypto::generate_attachment_id();
|
|
||||||
let data: AttachmentRequestData = data.into_inner().data;
|
let data: AttachmentRequestData = data.into_inner().data;
|
||||||
|
if data.FileSize < 0 {
|
||||||
|
err!("Attachment size can't be negative")
|
||||||
|
}
|
||||||
|
let attachment_id = crypto::generate_attachment_id();
|
||||||
let attachment =
|
let attachment =
|
||||||
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
|
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
|
||||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||||
@@ -1027,6 +1032,15 @@ async fn save_attachment(
|
|||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> Result<(Cipher, DbConn), crate::error::Error> {
|
) -> Result<(Cipher, DbConn), crate::error::Error> {
|
||||||
|
let mut data = data.into_inner();
|
||||||
|
|
||||||
|
let Some(size) = data.data.len().to_i64() else {
|
||||||
|
err!("Attachment data size overflow");
|
||||||
|
};
|
||||||
|
if size < 0 {
|
||||||
|
err!("Attachment size can't be negative")
|
||||||
|
}
|
||||||
|
|
||||||
let cipher = match Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
let cipher = match Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
@@ -1039,19 +1053,29 @@ async fn save_attachment(
|
|||||||
// In the v2 API, the attachment record has already been created,
|
// In the v2 API, the attachment record has already been created,
|
||||||
// so the size limit needs to be adjusted to account for that.
|
// so the size limit needs to be adjusted to account for that.
|
||||||
let size_adjust = match &attachment {
|
let size_adjust = match &attachment {
|
||||||
None => 0, // Legacy API
|
None => 0, // Legacy API
|
||||||
Some(a) => i64::from(a.file_size), // v2 API
|
Some(a) => a.file_size, // v2 API
|
||||||
};
|
};
|
||||||
|
|
||||||
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
||||||
match CONFIG.user_attachment_limit() {
|
match CONFIG.user_attachment_limit() {
|
||||||
Some(0) => err!("Attachments are disabled"),
|
Some(0) => err!("Attachments are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &mut conn).await + size_adjust;
|
let already_used = Attachment::size_by_user(user_uuid, &mut conn).await;
|
||||||
|
let left = limit_kb
|
||||||
|
.checked_mul(1024)
|
||||||
|
.and_then(|l| l.checked_sub(already_used))
|
||||||
|
.and_then(|l| l.checked_add(size_adjust));
|
||||||
|
|
||||||
|
let Some(left) = left else {
|
||||||
|
err!("Attachment size overflow");
|
||||||
|
};
|
||||||
|
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||||
}
|
}
|
||||||
Some(left as u64)
|
|
||||||
|
Some(left)
|
||||||
}
|
}
|
||||||
None => None,
|
None => None,
|
||||||
}
|
}
|
||||||
@@ -1059,11 +1083,21 @@ async fn save_attachment(
|
|||||||
match CONFIG.org_attachment_limit() {
|
match CONFIG.org_attachment_limit() {
|
||||||
Some(0) => err!("Attachments are disabled"),
|
Some(0) => err!("Attachments are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &mut conn).await + size_adjust;
|
let already_used = Attachment::size_by_org(org_uuid, &mut conn).await;
|
||||||
|
let left = limit_kb
|
||||||
|
.checked_mul(1024)
|
||||||
|
.and_then(|l| l.checked_sub(already_used))
|
||||||
|
.and_then(|l| l.checked_add(size_adjust));
|
||||||
|
|
||||||
|
let Some(left) = left else {
|
||||||
|
err!("Attachment size overflow");
|
||||||
|
};
|
||||||
|
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||||
}
|
}
|
||||||
Some(left as u64)
|
|
||||||
|
Some(left)
|
||||||
}
|
}
|
||||||
None => None,
|
None => None,
|
||||||
}
|
}
|
||||||
@@ -1071,10 +1105,8 @@ async fn save_attachment(
|
|||||||
err!("Cipher is neither owned by a user nor an organization");
|
err!("Cipher is neither owned by a user nor an organization");
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut data = data.into_inner();
|
|
||||||
|
|
||||||
if let Some(size_limit) = size_limit {
|
if let Some(size_limit) = size_limit {
|
||||||
if data.data.len() > size_limit {
|
if size > size_limit {
|
||||||
err!("Attachment storage limit exceeded with this file");
|
err!("Attachment storage limit exceeded with this file");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1084,20 +1116,19 @@ async fn save_attachment(
|
|||||||
None => crypto::generate_attachment_id(), // Legacy API
|
None => crypto::generate_attachment_id(), // Legacy API
|
||||||
};
|
};
|
||||||
|
|
||||||
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid);
|
|
||||||
let file_path = folder_path.join(&file_id);
|
|
||||||
tokio::fs::create_dir_all(&folder_path).await?;
|
|
||||||
|
|
||||||
let size = data.data.len() as i32;
|
|
||||||
if let Some(attachment) = &mut attachment {
|
if let Some(attachment) = &mut attachment {
|
||||||
// v2 API
|
// v2 API
|
||||||
|
|
||||||
// Check the actual size against the size initially provided by
|
// Check the actual size against the size initially provided by
|
||||||
// the client. Upstream allows +/- 1 MiB deviation from this
|
// the client. Upstream allows +/- 1 MiB deviation from this
|
||||||
// size, but it's not clear when or why this is needed.
|
// size, but it's not clear when or why this is needed.
|
||||||
const LEEWAY: i32 = 1024 * 1024; // 1 MiB
|
const LEEWAY: i64 = 1024 * 1024; // 1 MiB
|
||||||
let min_size = attachment.file_size - LEEWAY;
|
let Some(max_size) = attachment.file_size.checked_add(LEEWAY) else {
|
||||||
let max_size = attachment.file_size + LEEWAY;
|
err!("Invalid attachment size max")
|
||||||
|
};
|
||||||
|
let Some(min_size) = attachment.file_size.checked_sub(LEEWAY) else {
|
||||||
|
err!("Invalid attachment size min")
|
||||||
|
};
|
||||||
|
|
||||||
if min_size <= size && size <= max_size {
|
if min_size <= size && size <= max_size {
|
||||||
if size != attachment.file_size {
|
if size != attachment.file_size {
|
||||||
@@ -1112,6 +1143,10 @@ async fn save_attachment(
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Legacy API
|
// Legacy API
|
||||||
|
|
||||||
|
// SAFETY: This value is only stored in the database and is not used to access the file system.
|
||||||
|
// As a result, the conditions specified by Rocket [0] are met and this is safe to use.
|
||||||
|
// [0]: https://docs.rs/rocket/latest/rocket/fs/struct.FileName.html#-danger-
|
||||||
let encrypted_filename = data.data.raw_name().map(|s| s.dangerous_unsafe_unsanitized_raw().to_string());
|
let encrypted_filename = data.data.raw_name().map(|s| s.dangerous_unsafe_unsanitized_raw().to_string());
|
||||||
|
|
||||||
if encrypted_filename.is_none() {
|
if encrypted_filename.is_none() {
|
||||||
@@ -1121,10 +1156,14 @@ async fn save_attachment(
|
|||||||
err!("No attachment key provided")
|
err!("No attachment key provided")
|
||||||
}
|
}
|
||||||
let attachment =
|
let attachment =
|
||||||
Attachment::new(file_id, String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key);
|
Attachment::new(file_id.clone(), String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key);
|
||||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid);
|
||||||
|
let file_path = folder_path.join(&file_id);
|
||||||
|
tokio::fs::create_dir_all(&folder_path).await?;
|
||||||
|
|
||||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||||
data.data.move_copy_to(file_path).await?
|
data.data.move_copy_to(file_path).await?
|
||||||
}
|
}
|
||||||
@@ -1144,7 +1183,7 @@ async fn save_attachment(
|
|||||||
EventType::CipherAttachmentCreated as i32,
|
EventType::CipherAttachmentCreated as i32,
|
||||||
&cipher.uuid,
|
&cipher.uuid,
|
||||||
org_uuid,
|
org_uuid,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -1454,19 +1493,15 @@ struct OrganizationId {
|
|||||||
#[post("/ciphers/purge?<organization..>", data = "<data>")]
|
#[post("/ciphers/purge?<organization..>", data = "<data>")]
|
||||||
async fn delete_all(
|
async fn delete_all(
|
||||||
organization: Option<OrganizationId>,
|
organization: Option<OrganizationId>,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
match organization {
|
match organization {
|
||||||
Some(org_data) => {
|
Some(org_data) => {
|
||||||
@@ -1482,7 +1517,7 @@ async fn delete_all(
|
|||||||
EventType::OrganizationPurgedVault as i32,
|
EventType::OrganizationPurgedVault as i32,
|
||||||
&org_data.org_id,
|
&org_data.org_id,
|
||||||
&org_data.org_id,
|
&org_data.org_id,
|
||||||
user.uuid,
|
&user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -1563,16 +1598,8 @@ async fn _delete_cipher_by_uuid(
|
|||||||
false => EventType::CipherDeleted as i32,
|
false => EventType::CipherDeleted as i32,
|
||||||
};
|
};
|
||||||
|
|
||||||
log_event(
|
log_event(event_type, &cipher.uuid, &org_uuid, &headers.user.uuid, headers.device.atype, &headers.ip.ip, conn)
|
||||||
event_type,
|
.await;
|
||||||
&cipher.uuid,
|
|
||||||
&org_uuid,
|
|
||||||
headers.user.uuid.clone(),
|
|
||||||
headers.device.atype,
|
|
||||||
&headers.ip.ip,
|
|
||||||
conn,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -1632,7 +1659,7 @@ async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbCon
|
|||||||
EventType::CipherRestored as i32,
|
EventType::CipherRestored as i32,
|
||||||
&cipher.uuid.clone(),
|
&cipher.uuid.clone(),
|
||||||
org_uuid,
|
org_uuid,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -1716,7 +1743,7 @@ async fn _delete_cipher_attachment_by_id(
|
|||||||
EventType::CipherAttachmentDeleted as i32,
|
EventType::CipherAttachmentDeleted as i32,
|
||||||
&cipher.uuid,
|
&cipher.uuid,
|
||||||
&org_uuid,
|
&org_uuid,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -1752,7 +1779,7 @@ impl CipherSyncData {
|
|||||||
let cipher_folders: HashMap<String, String>;
|
let cipher_folders: HashMap<String, String>;
|
||||||
let cipher_favorites: HashSet<String>;
|
let cipher_favorites: HashSet<String>;
|
||||||
match sync_type {
|
match sync_type {
|
||||||
// User Sync supports Folders and Favorits
|
// User Sync supports Folders and Favorites
|
||||||
CipherSyncType::User => {
|
CipherSyncType::User => {
|
||||||
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
|
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
|
||||||
cipher_folders = FolderCipher::find_by_user(user_uuid, conn).await.into_iter().collect();
|
cipher_folders = FolderCipher::find_by_user(user_uuid, conn).await.into_iter().collect();
|
||||||
@@ -1760,7 +1787,7 @@ impl CipherSyncData {
|
|||||||
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
|
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
|
||||||
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await.into_iter().collect();
|
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await.into_iter().collect();
|
||||||
}
|
}
|
||||||
// Organization Sync does not support Folders and Favorits.
|
// Organization Sync does not support Folders and Favorites.
|
||||||
// If these are set, it will cause issues in the web-vault.
|
// If these are set, it will cause issues in the web-vault.
|
||||||
CipherSyncType::Organization => {
|
CipherSyncType::Organization => {
|
||||||
cipher_folders = HashMap::with_capacity(0);
|
cipher_folders = HashMap::with_capacity(0);
|
||||||
@@ -1799,15 +1826,22 @@ impl CipherSyncData {
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// Generate a HashMap with the collections_uuid as key and the CollectionGroup record
|
// Generate a HashMap with the collections_uuid as key and the CollectionGroup record
|
||||||
let user_collections_groups: HashMap<String, CollectionGroup> = CollectionGroup::find_by_user(user_uuid, conn)
|
let user_collections_groups: HashMap<String, CollectionGroup> = if CONFIG.org_groups_enabled() {
|
||||||
.await
|
CollectionGroup::find_by_user(user_uuid, conn)
|
||||||
.into_iter()
|
.await
|
||||||
.map(|collection_group| (collection_group.collections_uuid.clone(), collection_group))
|
.into_iter()
|
||||||
.collect();
|
.map(|collection_group| (collection_group.collections_uuid.clone(), collection_group))
|
||||||
|
.collect()
|
||||||
|
} else {
|
||||||
|
HashMap::new()
|
||||||
|
};
|
||||||
|
|
||||||
// Get all organizations that the user has full access to via group assignement
|
// Get all organizations that the user has full access to via group assignment
|
||||||
let user_group_full_access_for_organizations: HashSet<String> =
|
let user_group_full_access_for_organizations: HashSet<String> = if CONFIG.org_groups_enabled() {
|
||||||
Group::gather_user_organizations_full_access(user_uuid, conn).await.into_iter().collect();
|
Group::gather_user_organizations_full_access(user_uuid, conn).await.into_iter().collect()
|
||||||
|
} else {
|
||||||
|
HashSet::new()
|
||||||
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
cipher_attachments,
|
cipher_attachments,
|
||||||
|
@@ -5,11 +5,13 @@ use serde_json::Value;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{CipherSyncData, CipherSyncType},
|
core::{CipherSyncData, CipherSyncType},
|
||||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
EmptyResult, JsonResult, JsonUpcase,
|
||||||
},
|
},
|
||||||
auth::{decode_emergency_access_invite, Headers},
|
auth::{decode_emergency_access_invite, Headers},
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
@@ -18,6 +20,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
get_grantees,
|
get_grantees,
|
||||||
get_emergency_access,
|
get_emergency_access,
|
||||||
put_emergency_access,
|
put_emergency_access,
|
||||||
|
post_emergency_access,
|
||||||
delete_emergency_access,
|
delete_emergency_access,
|
||||||
post_delete_emergency_access,
|
post_delete_emergency_access,
|
||||||
send_invite,
|
send_invite,
|
||||||
@@ -37,42 +40,59 @@ pub fn routes() -> Vec<Route> {
|
|||||||
// region get
|
// region get
|
||||||
|
|
||||||
#[get("/emergency-access/trusted")]
|
#[get("/emergency-access/trusted")]
|
||||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
check_emergency_access_allowed()?;
|
if !CONFIG.emergency_access_allowed() {
|
||||||
|
return Json(json!({
|
||||||
|
"Data": [{
|
||||||
|
"Id": "",
|
||||||
|
"Status": 2,
|
||||||
|
"Type": 0,
|
||||||
|
"WaitTimeDays": 0,
|
||||||
|
"GranteeId": "",
|
||||||
|
"Email": "",
|
||||||
|
"Name": "NOTE: Emergency Access is disabled!",
|
||||||
|
"Object": "emergencyAccessGranteeDetails",
|
||||||
|
|
||||||
|
}],
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
}));
|
||||||
|
}
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"Data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/emergency-access/granted")]
|
#[get("/emergency-access/granted")]
|
||||||
async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
check_emergency_access_allowed()?;
|
let emergency_access_list = if CONFIG.emergency_access_allowed() {
|
||||||
|
EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await;
|
} else {
|
||||||
|
Vec::new()
|
||||||
|
};
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"Data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/emergency-access/<emer_id>")]
|
#[get("/emergency-access/<emer_id>")]
|
||||||
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
||||||
@@ -103,7 +123,7 @@ async fn post_emergency_access(
|
|||||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
||||||
|
|
||||||
@@ -133,7 +153,7 @@ async fn post_emergency_access(
|
|||||||
|
|
||||||
#[delete("/emergency-access/<emer_id>")]
|
#[delete("/emergency-access/<emer_id>")]
|
||||||
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let grantor_user = headers.user;
|
let grantor_user = headers.user;
|
||||||
|
|
||||||
@@ -169,7 +189,7 @@ struct EmergencyAccessInviteData {
|
|||||||
|
|
||||||
#[post("/emergency-access/invite", data = "<data>")]
|
#[post("/emergency-access/invite", data = "<data>")]
|
||||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
let data: EmergencyAccessInviteData = data.into_inner().data;
|
||||||
let email = data.Email.to_lowercase();
|
let email = data.Email.to_lowercase();
|
||||||
@@ -252,7 +272,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||||
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emer) => emer,
|
Some(emer) => emer,
|
||||||
@@ -312,14 +332,14 @@ struct AcceptData {
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||||
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: AcceptData = data.into_inner().data;
|
let data: AcceptData = data.into_inner().data;
|
||||||
let token = &data.Token;
|
let token = &data.Token;
|
||||||
let claims = decode_emergency_access_invite(token)?;
|
let claims = decode_emergency_access_invite(token)?;
|
||||||
|
|
||||||
// This can happen if the user who received the invite used a different email to signup.
|
// This can happen if the user who received the invite used a different email to signup.
|
||||||
// Since we do not know if this is intented, we error out here and do nothing with the invite.
|
// Since we do not know if this is intended, we error out here and do nothing with the invite.
|
||||||
if claims.email != headers.user.email {
|
if claims.email != headers.user.email {
|
||||||
err!("Claim email does not match current users email")
|
err!("Claim email does not match current users email")
|
||||||
}
|
}
|
||||||
@@ -395,7 +415,7 @@ async fn confirm_emergency_access(
|
|||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let confirming_user = headers.user;
|
let confirming_user = headers.user;
|
||||||
let data: ConfirmData = data.into_inner().data;
|
let data: ConfirmData = data.into_inner().data;
|
||||||
@@ -444,7 +464,7 @@ async fn confirm_emergency_access(
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/initiate")]
|
#[post("/emergency-access/<emer_id>/initiate")]
|
||||||
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let initiating_user = headers.user;
|
let initiating_user = headers.user;
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
@@ -484,7 +504,7 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/approve")]
|
#[post("/emergency-access/<emer_id>/approve")]
|
||||||
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emer) => emer,
|
Some(emer) => emer,
|
||||||
@@ -522,7 +542,7 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reject")]
|
#[post("/emergency-access/<emer_id>/reject")]
|
||||||
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emer) => emer,
|
Some(emer) => emer,
|
||||||
@@ -565,7 +585,7 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/view")]
|
#[post("/emergency-access/<emer_id>/view")]
|
||||||
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emer) => emer,
|
Some(emer) => emer,
|
||||||
@@ -602,7 +622,7 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/takeover")]
|
#[post("/emergency-access/<emer_id>/takeover")]
|
||||||
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
@@ -645,7 +665,7 @@ async fn password_emergency_access(
|
|||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
||||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
let new_master_password_hash = &data.NewMasterPasswordHash;
|
||||||
@@ -722,9 +742,9 @@ fn is_valid_request(
|
|||||||
&& emergency_access.atype == requested_access_type as i32
|
&& emergency_access.atype == requested_access_type as i32
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_emergency_access_allowed() -> EmptyResult {
|
fn check_emergency_access_enabled() -> EmptyResult {
|
||||||
if !CONFIG.emergency_access_allowed() {
|
if !CONFIG.emergency_access_allowed() {
|
||||||
err!("Emergency access is not allowed.")
|
err!("Emergency access is not enabled.")
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@@ -263,7 +263,7 @@ pub async fn log_event(
|
|||||||
event_type: i32,
|
event_type: i32,
|
||||||
source_uuid: &str,
|
source_uuid: &str,
|
||||||
org_uuid: &str,
|
org_uuid: &str,
|
||||||
act_user_uuid: String,
|
act_user_uuid: &str,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
ip: &IpAddr,
|
ip: &IpAddr,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
@@ -271,7 +271,7 @@ pub async fn log_event(
|
|||||||
if !CONFIG.org_events_enabled() {
|
if !CONFIG.org_events_enabled() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
|
_log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
@@ -8,11 +8,11 @@ mod public;
|
|||||||
mod sends;
|
mod sends;
|
||||||
pub mod two_factor;
|
pub mod two_factor;
|
||||||
|
|
||||||
|
pub use accounts::purge_auth_requests;
|
||||||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||||
pub use sends::purge_sends;
|
pub use sends::purge_sends;
|
||||||
pub use two_factor::send_incomplete_2fa_notifications;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||||
@@ -46,15 +46,14 @@ pub fn events_routes() -> Vec<Route> {
|
|||||||
//
|
//
|
||||||
// Move this somewhere else
|
// Move this somewhere else
|
||||||
//
|
//
|
||||||
use rocket::{serde::json::Json, Catcher, Route};
|
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::DbConn,
|
db::DbConn,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::get_reqwest_client,
|
util::{get_reqwest_client, parse_experimental_client_feature_flags},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
@@ -192,12 +191,19 @@ fn version() -> Json<&'static str> {
|
|||||||
#[get("/config")]
|
#[get("/config")]
|
||||||
fn config() -> Json<Value> {
|
fn config() -> Json<Value> {
|
||||||
let domain = crate::CONFIG.domain();
|
let domain = crate::CONFIG.domain();
|
||||||
|
let feature_states = parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"version": crate::VERSION,
|
// Note: The clients use this version to handle backwards compatibility concerns
|
||||||
|
// This means they expect a version that closely matches the Bitwarden server version
|
||||||
|
// We should make sure that we keep this updated when we support the new server features
|
||||||
|
// Version history:
|
||||||
|
// - Individual cipher key encryption: 2023.9.1
|
||||||
|
"version": "2023.9.1",
|
||||||
"gitHash": option_env!("GIT_REV"),
|
"gitHash": option_env!("GIT_REV"),
|
||||||
"server": {
|
"server": {
|
||||||
"name": "Vaultwarden",
|
"name": "Vaultwarden",
|
||||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
"url": "https://github.com/dani-garcia/vaultwarden",
|
||||||
|
"version": crate::VERSION
|
||||||
},
|
},
|
||||||
"environment": {
|
"environment": {
|
||||||
"vault": domain,
|
"vault": domain,
|
||||||
@@ -206,6 +212,7 @@ fn config() -> Json<Value> {
|
|||||||
"notifications": format!("{domain}/notifications"),
|
"notifications": format!("{domain}/notifications"),
|
||||||
"sso": "",
|
"sso": "",
|
||||||
},
|
},
|
||||||
|
"featureStates": feature_states,
|
||||||
"object": "config",
|
"object": "config",
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@@ -5,15 +5,14 @@ use serde_json::Value;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_event, CipherSyncData, CipherSyncType},
|
core::{log_event, two_factor, CipherSyncData, CipherSyncType},
|
||||||
ApiResult, EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordData,
|
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, PasswordOrOtpData, UpdateType,
|
||||||
UpdateType,
|
|
||||||
},
|
},
|
||||||
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
error::Error,
|
error::Error,
|
||||||
mail,
|
mail,
|
||||||
util::convert_json_key_lcase_first,
|
util::{convert_json_key_lcase_first, NumberOrString},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -61,6 +60,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
put_policy,
|
put_policy,
|
||||||
get_organization_tax,
|
get_organization_tax,
|
||||||
get_plans,
|
get_plans,
|
||||||
|
get_plans_all,
|
||||||
get_plans_tax_rates,
|
get_plans_tax_rates,
|
||||||
import,
|
import,
|
||||||
post_org_keys,
|
post_org_keys,
|
||||||
@@ -186,16 +186,13 @@ async fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, mut co
|
|||||||
#[delete("/organizations/<org_id>", data = "<data>")]
|
#[delete("/organizations/<org_id>", data = "<data>")]
|
||||||
async fn delete_organization(
|
async fn delete_organization(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: OwnerHeaders,
|
headers: OwnerHeaders,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&password_hash) {
|
data.validate(&headers.user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
match Organization::find_by_uuid(org_id, &mut conn).await {
|
match Organization::find_by_uuid(org_id, &mut conn).await {
|
||||||
None => err!("Organization not found"),
|
None => err!("Organization not found"),
|
||||||
@@ -206,7 +203,7 @@ async fn delete_organization(
|
|||||||
#[post("/organizations/<org_id>/delete", data = "<data>")]
|
#[post("/organizations/<org_id>/delete", data = "<data>")]
|
||||||
async fn post_delete_organization(
|
async fn post_delete_organization(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: OwnerHeaders,
|
headers: OwnerHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
@@ -228,7 +225,7 @@ async fn leave_organization(org_id: &str, headers: Headers, mut conn: DbConn) ->
|
|||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -281,7 +278,7 @@ async fn post_organization(
|
|||||||
EventType::OrganizationUpdated as i32,
|
EventType::OrganizationUpdated as i32,
|
||||||
org_id,
|
org_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -296,7 +293,7 @@ async fn post_organization(
|
|||||||
async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data":
|
"Data":
|
||||||
Collection::find_by_user_uuid(headers.user.uuid.clone(), &mut conn).await
|
Collection::find_by_user_uuid(headers.user.uuid, &mut conn).await
|
||||||
.iter()
|
.iter()
|
||||||
.map(Collection::to_json)
|
.map(Collection::to_json)
|
||||||
.collect::<Value>(),
|
.collect::<Value>(),
|
||||||
@@ -398,7 +395,7 @@ async fn post_organization_collections(
|
|||||||
EventType::CollectionCreated as i32,
|
EventType::CollectionCreated as i32,
|
||||||
&collection.uuid,
|
&collection.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -468,14 +465,18 @@ async fn post_organization_collection_update(
|
|||||||
}
|
}
|
||||||
|
|
||||||
collection.name = data.Name;
|
collection.name = data.Name;
|
||||||
collection.external_id = data.ExternalId;
|
collection.external_id = match data.ExternalId {
|
||||||
|
Some(external_id) if !external_id.trim().is_empty() => Some(external_id),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
collection.save(&mut conn).await?;
|
collection.save(&mut conn).await?;
|
||||||
|
|
||||||
log_event(
|
log_event(
|
||||||
EventType::CollectionUpdated as i32,
|
EventType::CollectionUpdated as i32,
|
||||||
&collection.uuid,
|
&collection.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -563,7 +564,7 @@ async fn _delete_organization_collection(
|
|||||||
EventType::CollectionDeleted as i32,
|
EventType::CollectionDeleted as i32,
|
||||||
&collection.uuid,
|
&collection.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -609,7 +610,6 @@ async fn post_organization_collection_delete(
|
|||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct BulkCollectionIds {
|
struct BulkCollectionIds {
|
||||||
Ids: Vec<String>,
|
Ids: Vec<String>,
|
||||||
OrganizationId: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/organizations/<org_id>/collections", data = "<data>")]
|
#[delete("/organizations/<org_id>/collections", data = "<data>")]
|
||||||
@@ -620,9 +620,6 @@ async fn bulk_delete_organization_collections(
|
|||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: BulkCollectionIds = data.into_inner().data;
|
let data: BulkCollectionIds = data.into_inner().data;
|
||||||
if org_id != data.OrganizationId {
|
|
||||||
err!("OrganizationId mismatch");
|
|
||||||
}
|
|
||||||
|
|
||||||
let collections = data.Ids;
|
let collections = data.Ids;
|
||||||
|
|
||||||
@@ -944,7 +941,7 @@ async fn send_invite(
|
|||||||
EventType::OrganizationUserInvited as i32,
|
EventType::OrganizationUserInvited as i32,
|
||||||
&new_user.uuid,
|
&new_user.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -1238,7 +1235,7 @@ async fn _confirm_invite(
|
|||||||
EventType::OrganizationUserConfirmed as i32,
|
EventType::OrganizationUserConfirmed as i32,
|
||||||
&user_to_confirm.uuid,
|
&user_to_confirm.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -1400,7 +1397,7 @@ async fn edit_user(
|
|||||||
EventType::OrganizationUserUpdated as i32,
|
EventType::OrganizationUserUpdated as i32,
|
||||||
&user_to_edit.uuid,
|
&user_to_edit.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -1492,7 +1489,7 @@ async fn _delete_user(
|
|||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_to_delete.uuid,
|
&user_to_delete.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -1516,9 +1513,9 @@ async fn bulk_public_keys(
|
|||||||
let data: OrgBulkIds = data.into_inner().data;
|
let data: OrgBulkIds = data.into_inner().data;
|
||||||
|
|
||||||
let mut bulk_response = Vec::new();
|
let mut bulk_response = Vec::new();
|
||||||
// Check all received UserOrg UUID's and find the matching User to retreive the public-key.
|
// Check all received UserOrg UUID's and find the matching User to retrieve the public-key.
|
||||||
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
|
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
|
||||||
// The web-vault will then ignore that user for the folowing steps.
|
// The web-vault will then ignore that user for the following steps.
|
||||||
for user_org_id in data.Ids {
|
for user_org_id in data.Ids {
|
||||||
match UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &mut conn).await {
|
match UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &mut conn).await {
|
||||||
Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &mut conn).await {
|
Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &mut conn).await {
|
||||||
@@ -1695,38 +1692,16 @@ async fn put_policy(
|
|||||||
None => err!("Invalid or unsupported policy type"),
|
None => err!("Invalid or unsupported policy type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// When enabling the TwoFactorAuthentication policy, remove this org's members that do have 2FA
|
// When enabling the TwoFactorAuthentication policy, revoke all members that do not have 2FA
|
||||||
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
|
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
|
||||||
for member in UserOrganization::find_by_org(org_id, &mut conn).await.into_iter() {
|
two_factor::enforce_2fa_policy_for_org(
|
||||||
let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &mut conn).await.is_empty();
|
org_id,
|
||||||
|
&headers.user.uuid,
|
||||||
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
|
headers.device.atype,
|
||||||
// Invited users still need to accept the invite and will get an error when they try to accept the invite.
|
&headers.ip.ip,
|
||||||
if user_twofactor_disabled
|
&mut conn,
|
||||||
&& member.atype < UserOrgType::Admin
|
)
|
||||||
&& member.status != UserOrgStatus::Invited as i32
|
.await?;
|
||||||
{
|
|
||||||
if CONFIG.mail_enabled() {
|
|
||||||
let org = Organization::find_by_uuid(&member.org_uuid, &mut conn).await.unwrap();
|
|
||||||
let user = User::find_by_uuid(&member.user_uuid, &mut conn).await.unwrap();
|
|
||||||
|
|
||||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
log_event(
|
|
||||||
EventType::OrganizationUserRemoved as i32,
|
|
||||||
&member.uuid,
|
|
||||||
org_id,
|
|
||||||
headers.user.uuid.clone(),
|
|
||||||
headers.device.atype,
|
|
||||||
&headers.ip.ip,
|
|
||||||
&mut conn,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
member.delete(&mut conn).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// When enabling the SingleOrg policy, remove this org's members that are members of other orgs
|
// When enabling the SingleOrg policy, remove this org's members that are members of other orgs
|
||||||
@@ -1751,7 +1726,7 @@ async fn put_policy(
|
|||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&member.uuid,
|
&member.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -1776,7 +1751,7 @@ async fn put_policy(
|
|||||||
EventType::PolicyUpdated as i32,
|
EventType::PolicyUpdated as i32,
|
||||||
&policy.uuid,
|
&policy.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -1807,12 +1782,28 @@ fn get_plans() -> Json<Value> {
|
|||||||
"Product": 0,
|
"Product": 0,
|
||||||
"Name": "Free",
|
"Name": "Free",
|
||||||
"NameLocalizationKey": "planNameFree",
|
"NameLocalizationKey": "planNameFree",
|
||||||
|
"BitwardenProduct": 0,
|
||||||
|
"MaxUsers": 0,
|
||||||
|
"DescriptionLocalizationKey": "planDescFree"
|
||||||
|
},{
|
||||||
|
"Object": "plan",
|
||||||
|
"Type": 0,
|
||||||
|
"Product": 1,
|
||||||
|
"Name": "Free",
|
||||||
|
"NameLocalizationKey": "planNameFree",
|
||||||
|
"BitwardenProduct": 1,
|
||||||
|
"MaxUsers": 0,
|
||||||
"DescriptionLocalizationKey": "planDescFree"
|
"DescriptionLocalizationKey": "planDescFree"
|
||||||
}],
|
}],
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/plans/all")]
|
||||||
|
fn get_plans_all() -> Json<Value> {
|
||||||
|
get_plans()
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/plans/sales-tax-rates")]
|
#[get("/plans/sales-tax-rates")]
|
||||||
fn get_plans_tax_rates(_headers: Headers) -> Json<Value> {
|
fn get_plans_tax_rates(_headers: Headers) -> Json<Value> {
|
||||||
// Prevent a 404 error, which also causes Javascript errors.
|
// Prevent a 404 error, which also causes Javascript errors.
|
||||||
@@ -1862,7 +1853,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
|||||||
// This means that this endpoint can end up removing users that were added manually by an admin,
|
// This means that this endpoint can end up removing users that were added manually by an admin,
|
||||||
// as opposed to upstream which only removes auto-imported users.
|
// as opposed to upstream which only removes auto-imported users.
|
||||||
|
|
||||||
// User needs to be admin or owner to use the Directry Connector
|
// User needs to be admin or owner to use the Directory Connector
|
||||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
|
match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
|
||||||
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
||||||
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
||||||
@@ -1877,7 +1868,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
|||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -1907,7 +1898,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
|||||||
EventType::OrganizationUserInvited as i32,
|
EventType::OrganizationUserInvited as i32,
|
||||||
&new_org_user.uuid,
|
&new_org_user.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -1943,7 +1934,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
|||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -2056,7 +2047,7 @@ async fn _revoke_organization_user(
|
|||||||
EventType::OrganizationUserRevoked as i32,
|
EventType::OrganizationUserRevoked as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -2175,7 +2166,7 @@ async fn _restore_organization_user(
|
|||||||
EventType::OrganizationUserRestored as i32,
|
EventType::OrganizationUserRestored as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -2222,29 +2213,22 @@ struct GroupRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GroupRequest {
|
impl GroupRequest {
|
||||||
pub fn to_group(&self, organizations_uuid: &str) -> ApiResult<Group> {
|
pub fn to_group(&self, organizations_uuid: &str) -> Group {
|
||||||
match self.AccessAll {
|
Group::new(
|
||||||
Some(access_all_value) => Ok(Group::new(
|
String::from(organizations_uuid),
|
||||||
organizations_uuid.to_owned(),
|
self.Name.clone(),
|
||||||
self.Name.clone(),
|
self.AccessAll.unwrap_or(false),
|
||||||
access_all_value,
|
self.ExternalId.clone(),
|
||||||
self.ExternalId.clone(),
|
)
|
||||||
)),
|
|
||||||
_ => err!("Could not convert GroupRequest to Group, because AccessAll has no value!"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_group(&self, mut group: Group) -> ApiResult<Group> {
|
pub fn update_group(&self, mut group: Group) -> Group {
|
||||||
match self.AccessAll {
|
group.name = self.Name.clone();
|
||||||
Some(access_all_value) => {
|
group.access_all = self.AccessAll.unwrap_or(false);
|
||||||
group.name = self.Name.clone();
|
// Group Updates do not support changing the external_id
|
||||||
group.access_all = access_all_value;
|
// These input fields are in a disabled state, and can only be updated/added via ldap_import
|
||||||
group.set_external_id(self.ExternalId.clone());
|
|
||||||
|
|
||||||
Ok(group)
|
group
|
||||||
}
|
|
||||||
_ => err!("Could not update group, because AccessAll has no value!"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2305,13 +2289,13 @@ async fn post_groups(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let group_request = data.into_inner().data;
|
let group_request = data.into_inner().data;
|
||||||
let group = group_request.to_group(org_id)?;
|
let group = group_request.to_group(org_id);
|
||||||
|
|
||||||
log_event(
|
log_event(
|
||||||
EventType::GroupCreated as i32,
|
EventType::GroupCreated as i32,
|
||||||
&group.uuid,
|
&group.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -2339,7 +2323,7 @@ async fn put_group(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let group_request = data.into_inner().data;
|
let group_request = data.into_inner().data;
|
||||||
let updated_group = group_request.update_group(group)?;
|
let updated_group = group_request.update_group(group);
|
||||||
|
|
||||||
CollectionGroup::delete_all_by_group(group_id, &mut conn).await?;
|
CollectionGroup::delete_all_by_group(group_id, &mut conn).await?;
|
||||||
GroupUser::delete_all_by_group(group_id, &mut conn).await?;
|
GroupUser::delete_all_by_group(group_id, &mut conn).await?;
|
||||||
@@ -2348,7 +2332,7 @@ async fn put_group(
|
|||||||
EventType::GroupUpdated as i32,
|
EventType::GroupUpdated as i32,
|
||||||
&updated_group.uuid,
|
&updated_group.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -2381,7 +2365,7 @@ async fn add_update_group(
|
|||||||
EventType::OrganizationUserUpdatedGroups as i32,
|
EventType::OrganizationUserUpdatedGroups as i32,
|
||||||
&assigned_user_id,
|
&assigned_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -2436,7 +2420,7 @@ async fn _delete_group(org_id: &str, group_id: &str, headers: &AdminHeaders, con
|
|||||||
EventType::GroupDeleted as i32,
|
EventType::GroupDeleted as i32,
|
||||||
&group.uuid,
|
&group.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
@@ -2527,7 +2511,7 @@ async fn put_group_users(
|
|||||||
EventType::OrganizationUserUpdatedGroups as i32,
|
EventType::OrganizationUserUpdatedGroups as i32,
|
||||||
&assigned_user_id,
|
&assigned_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -2605,7 +2589,7 @@ async fn put_user_groups(
|
|||||||
EventType::OrganizationUserUpdatedGroups as i32,
|
EventType::OrganizationUserUpdatedGroups as i32,
|
||||||
org_user_id,
|
org_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -2660,7 +2644,7 @@ async fn delete_group_user(
|
|||||||
EventType::OrganizationUserUpdatedGroups as i32,
|
EventType::OrganizationUserUpdatedGroups as i32,
|
||||||
org_user_id,
|
org_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -2749,7 +2733,7 @@ async fn put_reset_password(
|
|||||||
EventType::OrganizationUserAdminResetPassword as i32,
|
EventType::OrganizationUserAdminResetPassword as i32,
|
||||||
org_user_id,
|
org_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -2876,15 +2860,14 @@ async fn put_reset_password_enrollment(
|
|||||||
EventType::OrganizationUserResetPasswordWithdraw as i32
|
EventType::OrganizationUserResetPasswordWithdraw as i32
|
||||||
};
|
};
|
||||||
|
|
||||||
log_event(log_id, org_user_id, org_id, headers.user.uuid.clone(), headers.device.atype, &headers.ip.ip, &mut conn)
|
log_event(log_id, org_user_id, org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a new function active since the v2022.9.x clients.
|
// This is a new function active since the v2022.9.x clients.
|
||||||
// It combines the previous two calls done before.
|
// It combines the previous two calls done before.
|
||||||
// We call those two functions here and combine them our selfs.
|
// We call those two functions here and combine them ourselves.
|
||||||
//
|
//
|
||||||
// NOTE: It seems clients can't handle uppercase-first keys!!
|
// NOTE: It seems clients can't handle uppercase-first keys!!
|
||||||
// We need to convert all keys so they have the first character to be a lowercase.
|
// We need to convert all keys so they have the first character to be a lowercase.
|
||||||
@@ -2932,18 +2915,16 @@ async fn get_org_export(org_id: &str, headers: AdminHeaders, mut conn: DbConn) -
|
|||||||
|
|
||||||
async fn _api_key(
|
async fn _api_key(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
rotate: bool,
|
rotate: bool,
|
||||||
headers: AdminHeaders,
|
headers: AdminHeaders,
|
||||||
conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
// Validate the admin users password
|
// Validate the admin users password/otp
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_id, &conn).await {
|
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_id, &conn).await {
|
||||||
Some(mut org_api_key) => {
|
Some(mut org_api_key) => {
|
||||||
@@ -2970,14 +2951,14 @@ async fn _api_key(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/api-key", data = "<data>")]
|
#[post("/organizations/<org_id>/api-key", data = "<data>")]
|
||||||
async fn api_key(org_id: &str, data: JsonUpcase<PasswordData>, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
async fn api_key(org_id: &str, data: JsonUpcase<PasswordOrOtpData>, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
_api_key(org_id, data, false, headers, conn).await
|
_api_key(org_id, data, false, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/rotate-api-key", data = "<data>")]
|
#[post("/organizations/<org_id>/rotate-api-key", data = "<data>")]
|
||||||
async fn rotate_api_key(
|
async fn rotate_api_key(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: AdminHeaders,
|
headers: AdminHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
|
@@ -56,16 +56,34 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
if let Some(mut user_org) =
|
if let Some(mut user_org) =
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||||
{
|
{
|
||||||
user_org.revoke();
|
// Only revoke a user if it is not the last confirmed owner
|
||||||
user_org.save(&mut conn).await?;
|
let revoked = if user_org.atype == UserOrgType::Owner
|
||||||
}
|
&& user_org.status == UserOrgStatus::Confirmed as i32
|
||||||
|
{
|
||||||
|
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn).await
|
||||||
|
<= 1
|
||||||
|
{
|
||||||
|
warn!("Can't revoke the last owner");
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
user_org.revoke()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
user_org.revoke()
|
||||||
|
};
|
||||||
|
|
||||||
|
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||||
|
if revoked || ext_modified {
|
||||||
|
user_org.save(&mut conn).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
// If user is part of the organization, restore it
|
// If user is part of the organization, restore it
|
||||||
} else if let Some(mut user_org) =
|
} else if let Some(mut user_org) =
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||||
{
|
{
|
||||||
if user_org.status < UserOrgStatus::Revoked as i32 {
|
let restored = user_org.restore();
|
||||||
user_org.restore();
|
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||||
|
if restored || ext_modified {
|
||||||
user_org.save(&mut conn).await?;
|
user_org.save(&mut conn).await?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -73,9 +91,8 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
||||||
Some(user) => user, // exists in vaultwarden
|
Some(user) => user, // exists in vaultwarden
|
||||||
None => {
|
None => {
|
||||||
// doesn't exist in vaultwarden
|
// User does not exist yet
|
||||||
let mut new_user = User::new(user_data.Email.clone());
|
let mut new_user = User::new(user_data.Email.clone());
|
||||||
new_user.set_external_id(Some(user_data.ExternalId.clone()));
|
|
||||||
new_user.save(&mut conn).await?;
|
new_user.save(&mut conn).await?;
|
||||||
|
|
||||||
if !CONFIG.mail_enabled() {
|
if !CONFIG.mail_enabled() {
|
||||||
@@ -85,13 +102,14 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
new_user
|
new_user
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let user_org_status = if CONFIG.mail_enabled() {
|
let user_org_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() {
|
||||||
UserOrgStatus::Invited as i32
|
UserOrgStatus::Invited as i32
|
||||||
} else {
|
} else {
|
||||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||||
|
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
|
||||||
new_org_user.access_all = false;
|
new_org_user.access_all = false;
|
||||||
new_org_user.atype = UserOrgType::User as i32;
|
new_org_user.atype = UserOrgType::User as i32;
|
||||||
new_org_user.status = user_org_status;
|
new_org_user.status = user_org_status;
|
||||||
@@ -132,12 +150,10 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||||
|
|
||||||
for ext_id in &group_data.MemberExternalIds {
|
for ext_id in &group_data.MemberExternalIds {
|
||||||
if let Some(user) = User::find_by_external_id(ext_id, &mut conn).await {
|
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
|
||||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await
|
{
|
||||||
{
|
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
group_user.save(&mut conn).await?;
|
||||||
group_user.save(&mut conn).await?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -150,10 +166,8 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||||
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
||||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
||||||
if let Some(user_external_id) =
|
if let Some(ref user_external_id) = user_org.external_id {
|
||||||
User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.external_id)
|
if !sync_members.contains(user_external_id) {
|
||||||
{
|
|
||||||
if user_external_id.is_some() && !sync_members.contains(&user_external_id.unwrap()) {
|
|
||||||
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
||||||
// Removing owner, check that there is at least one other confirmed owner
|
// Removing owner, check that there is at least one other confirmed owner
|
||||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use chrono::{DateTime, Duration, Utc};
|
use chrono::{DateTime, Duration, Utc};
|
||||||
|
use num_traits::ToPrimitive;
|
||||||
use rocket::form::Form;
|
use rocket::form::Form;
|
||||||
use rocket::fs::NamedFile;
|
use rocket::fs::NamedFile;
|
||||||
use rocket::fs::TempFile;
|
use rocket::fs::TempFile;
|
||||||
@@ -8,17 +9,17 @@ use rocket::serde::json::Json;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
|
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||||
auth::{ClientIp, Headers, Host},
|
auth::{ClientIp, Headers, Host},
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
util::SafeString,
|
util::{NumberOrString, SafeString},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||||
|
|
||||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||||
const SIZE_525_MB: u64 = 550_502_400;
|
const SIZE_525_MB: i64 = 550_502_400;
|
||||||
|
|
||||||
pub fn routes() -> Vec<rocket::Route> {
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
routes![
|
routes![
|
||||||
@@ -216,30 +217,41 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||||||
} = data.into_inner();
|
} = data.into_inner();
|
||||||
let model = model.into_inner().data;
|
let model = model.into_inner().data;
|
||||||
|
|
||||||
|
let Some(size) = data.len().to_i64() else {
|
||||||
|
err!("Invalid send size");
|
||||||
|
};
|
||||||
|
if size < 0 {
|
||||||
|
err!("Send size can't be negative")
|
||||||
|
}
|
||||||
|
|
||||||
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
||||||
|
|
||||||
let size_limit = match CONFIG.user_attachment_limit() {
|
let size_limit = match CONFIG.user_send_limit() {
|
||||||
Some(0) => err!("File uploads are disabled"),
|
Some(0) => err!("File uploads are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||||
|
err!("Existing sends overflow")
|
||||||
|
};
|
||||||
|
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||||
|
err!("Send size overflow");
|
||||||
|
};
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Send storage limit reached! Delete some sends to free up space")
|
||||||
}
|
}
|
||||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
i64::clamp(left, 0, SIZE_525_MB)
|
||||||
}
|
}
|
||||||
None => SIZE_525_MB,
|
None => SIZE_525_MB,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if size > size_limit {
|
||||||
|
err!("Send storage limit exceeded with this file");
|
||||||
|
}
|
||||||
|
|
||||||
let mut send = create_send(model, headers.user.uuid)?;
|
let mut send = create_send(model, headers.user.uuid)?;
|
||||||
if send.atype != SendType::File as i32 {
|
if send.atype != SendType::File as i32 {
|
||||||
err!("Send content is not a file");
|
err!("Send content is not a file");
|
||||||
}
|
}
|
||||||
|
|
||||||
let size = data.len();
|
|
||||||
if size > size_limit {
|
|
||||||
err!("Attachment storage limit exceeded with this file");
|
|
||||||
}
|
|
||||||
|
|
||||||
let file_id = crate::crypto::generate_send_id();
|
let file_id = crate::crypto::generate_send_id();
|
||||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||||
let file_path = folder_path.join(&file_id);
|
let file_path = folder_path.join(&file_id);
|
||||||
@@ -253,7 +265,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||||||
if let Some(o) = data_value.as_object_mut() {
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
o.insert(String::from("Id"), Value::String(file_id));
|
o.insert(String::from("Id"), Value::String(file_id));
|
||||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
|
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
||||||
}
|
}
|
||||||
send.data = serde_json::to_string(&data_value)?;
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
|
|
||||||
@@ -285,24 +297,32 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
|||||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||||
|
|
||||||
let file_length = match &data.FileLength {
|
let file_length = match &data.FileLength {
|
||||||
Some(m) => Some(m.into_i32()?),
|
Some(m) => m.into_i64()?,
|
||||||
_ => None,
|
_ => err!("Invalid send length"),
|
||||||
};
|
};
|
||||||
|
if file_length < 0 {
|
||||||
|
err!("Send size can't be negative")
|
||||||
|
}
|
||||||
|
|
||||||
let size_limit = match CONFIG.user_attachment_limit() {
|
let size_limit = match CONFIG.user_send_limit() {
|
||||||
Some(0) => err!("File uploads are disabled"),
|
Some(0) => err!("File uploads are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||||
|
err!("Existing sends overflow")
|
||||||
|
};
|
||||||
|
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||||
|
err!("Send size overflow");
|
||||||
|
};
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Send storage limit reached! Delete some sends to free up space")
|
||||||
}
|
}
|
||||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
i64::clamp(left, 0, SIZE_525_MB)
|
||||||
}
|
}
|
||||||
None => SIZE_525_MB,
|
None => SIZE_525_MB,
|
||||||
};
|
};
|
||||||
|
|
||||||
if file_length.is_some() && file_length.unwrap() as u64 > size_limit {
|
if file_length > size_limit {
|
||||||
err!("Attachment storage limit exceeded with this file");
|
err!("Send storage limit exceeded with this file");
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut send = create_send(data, headers.user.uuid)?;
|
let mut send = create_send(data, headers.user.uuid)?;
|
||||||
@@ -312,8 +332,8 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
|||||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||||
if let Some(o) = data_value.as_object_mut() {
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
||||||
o.insert(String::from("Size"), Value::Number(file_length.unwrap().into()));
|
o.insert(String::from("Size"), Value::Number(file_length.into()));
|
||||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap())));
|
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length)));
|
||||||
}
|
}
|
||||||
send.data = serde_json::to_string(&data_value)?;
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
send.save(&mut conn).await?;
|
send.save(&mut conn).await?;
|
||||||
@@ -340,9 +360,13 @@ async fn post_send_file_v2_data(
|
|||||||
|
|
||||||
let mut data = data.into_inner();
|
let mut data = data.into_inner();
|
||||||
|
|
||||||
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else { err!("Send not found. Unable to save the file.") };
|
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else {
|
||||||
|
err!("Send not found. Unable to save the file.")
|
||||||
|
};
|
||||||
|
|
||||||
let Some(send_user_id) = &send.user_uuid else {err!("Sends are only supported for users at the moment")};
|
let Some(send_user_id) = &send.user_uuid else {
|
||||||
|
err!("Sends are only supported for users at the moment")
|
||||||
|
};
|
||||||
if send_user_id != &headers.user.uuid {
|
if send_user_id != &headers.user.uuid {
|
||||||
err!("Send doesn't belong to user");
|
err!("Send doesn't belong to user");
|
||||||
}
|
}
|
||||||
|
@@ -5,7 +5,7 @@ use rocket::Route;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
||||||
NumberOrString, PasswordData,
|
PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::{ClientIp, Headers},
|
auth::{ClientIp, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
@@ -13,6 +13,7 @@ use crate::{
|
|||||||
models::{EventType, TwoFactor, TwoFactorType},
|
models::{EventType, TwoFactor, TwoFactorType},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
|
util::NumberOrString,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use crate::config::CONFIG;
|
pub use crate::config::CONFIG;
|
||||||
@@ -22,13 +23,11 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||||
async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let type_ = TwoFactorType::Authenticator as i32;
|
let type_ = TwoFactorType::Authenticator as i32;
|
||||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
||||||
@@ -48,9 +47,10 @@ async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers
|
|||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EnableAuthenticatorData {
|
struct EnableAuthenticatorData {
|
||||||
MasterPasswordHash: String,
|
|
||||||
Key: String,
|
Key: String,
|
||||||
Token: NumberOrString,
|
Token: NumberOrString,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/authenticator", data = "<data>")]
|
#[post("/two-factor/authenticator", data = "<data>")]
|
||||||
@@ -60,15 +60,17 @@ async fn activate_authenticator(
|
|||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
let key = data.Key;
|
let key = data.Key;
|
||||||
let token = data.Token.into_string();
|
let token = data.Token.into_string();
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Validate key as base32 and 20 bytes length
|
// Validate key as base32 and 20 bytes length
|
||||||
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
|
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
|
||||||
@@ -177,7 +179,7 @@ pub async fn validate_totp_code(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Else no valide code received, deny access
|
// Else no valid code received, deny access
|
||||||
err!(
|
err!(
|
||||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
|
@@ -6,7 +6,7 @@ use rocket::Route;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||||
PasswordData,
|
PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
@@ -92,14 +92,13 @@ impl DuoStatus {
|
|||||||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||||
|
|
||||||
#[post("/two-factor/get-duo", data = "<data>")]
|
#[post("/two-factor/get-duo", data = "<data>")]
|
||||||
async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let data = get_user_duo_data(&headers.user.uuid, &mut conn).await;
|
let data = get_user_duo_data(&user.uuid, &mut conn).await;
|
||||||
|
|
||||||
let (enabled, data) = match data {
|
let (enabled, data) = match data {
|
||||||
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
||||||
@@ -129,10 +128,11 @@ async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbC
|
|||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case, dead_code)]
|
#[allow(non_snake_case, dead_code)]
|
||||||
struct EnableDuoData {
|
struct EnableDuoData {
|
||||||
MasterPasswordHash: String,
|
|
||||||
Host: String,
|
Host: String,
|
||||||
SecretKey: String,
|
SecretKey: String,
|
||||||
IntegrationKey: String,
|
IntegrationKey: String,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<EnableDuoData> for DuoData {
|
impl From<EnableDuoData> for DuoData {
|
||||||
@@ -159,9 +159,12 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
|||||||
let data: EnableDuoData = data.into_inner().data;
|
let data: EnableDuoData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash.clone(),
|
||||||
|
Otp: data.Otp.clone(),
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let (data, data_str) = if check_duo_fields_custom(&data) {
|
let (data, data_str) = if check_duo_fields_custom(&data) {
|
||||||
let data_req: DuoData = data.into();
|
let data_req: DuoData = data.into();
|
||||||
|
@@ -5,7 +5,7 @@ use rocket::Route;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
@@ -76,13 +76,11 @@ pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
|||||||
|
|
||||||
/// When user clicks on Manage email 2FA show the user the related information
|
/// When user clicks on Manage email 2FA show the user the related information
|
||||||
#[post("/two-factor/get-email", data = "<data>")]
|
#[post("/two-factor/get-email", data = "<data>")]
|
||||||
async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (enabled, mfa_email) =
|
let (enabled, mfa_email) =
|
||||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
||||||
@@ -105,7 +103,8 @@ async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: D
|
|||||||
struct SendEmailData {
|
struct SendEmailData {
|
||||||
/// Email where 2FA codes will be sent to, can be different than user email account.
|
/// Email where 2FA codes will be sent to, can be different than user email account.
|
||||||
Email: String,
|
Email: String,
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
||||||
@@ -114,9 +113,12 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||||||
let data: SendEmailData = data.into_inner().data;
|
let data: SendEmailData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, false, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if !CONFIG._enable_email_2fa() {
|
if !CONFIG._enable_email_2fa() {
|
||||||
err!("Email 2FA is disabled")
|
err!("Email 2FA is disabled")
|
||||||
@@ -144,8 +146,9 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EmailData {
|
struct EmailData {
|
||||||
Email: String,
|
Email: String,
|
||||||
MasterPasswordHash: String,
|
|
||||||
Token: String,
|
Token: String,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||||
@@ -154,9 +157,13 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
|
|||||||
let data: EmailData = data.into_inner().data;
|
let data: EmailData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
// This is the last step in the verification process, delete the otp directly afterwards
|
||||||
err!("Invalid password");
|
PasswordOrOtpData {
|
||||||
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||||
let mut twofactor =
|
let mut twofactor =
|
||||||
|
@@ -5,16 +5,22 @@ use rocket::Route;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
api::{
|
||||||
|
core::{log_event, log_user_event},
|
||||||
|
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||||
|
},
|
||||||
auth::{ClientHeaders, Headers},
|
auth::{ClientHeaders, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod authenticator;
|
pub mod authenticator;
|
||||||
pub mod duo;
|
pub mod duo;
|
||||||
pub mod email;
|
pub mod email;
|
||||||
|
pub mod protected_actions;
|
||||||
pub mod webauthn;
|
pub mod webauthn;
|
||||||
pub mod yubikey;
|
pub mod yubikey;
|
||||||
|
|
||||||
@@ -33,6 +39,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
routes.append(&mut email::routes());
|
routes.append(&mut email::routes());
|
||||||
routes.append(&mut webauthn::routes());
|
routes.append(&mut webauthn::routes());
|
||||||
routes.append(&mut yubikey::routes());
|
routes.append(&mut yubikey::routes());
|
||||||
|
routes.append(&mut protected_actions::routes());
|
||||||
|
|
||||||
routes
|
routes
|
||||||
}
|
}
|
||||||
@@ -50,13 +57,11 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-recover", data = "<data>")]
|
#[post("/two-factor/get-recover", data = "<data>")]
|
||||||
fn get_recover(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
|
async fn get_recover(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Code": user.totp_recover,
|
"Code": user.totp_recover,
|
||||||
@@ -96,6 +101,7 @@ async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeade
|
|||||||
|
|
||||||
// Remove all twofactors from the user
|
// Remove all twofactors from the user
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
|
enforce_2fa_policy(&user, &user.uuid, client_headers.device_type, &client_headers.ip.ip, &mut conn).await?;
|
||||||
|
|
||||||
log_user_event(
|
log_user_event(
|
||||||
EventType::UserRecovered2fa as i32,
|
EventType::UserRecovered2fa as i32,
|
||||||
@@ -123,19 +129,23 @@ async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
|||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct DisableTwoFactorData {
|
struct DisableTwoFactorData {
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
Type: NumberOrString,
|
Type: NumberOrString,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/disable", data = "<data>")]
|
#[post("/two-factor/disable", data = "<data>")]
|
||||||
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: DisableTwoFactorData = data.into_inner().data;
|
let data: DisableTwoFactorData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
// Delete directly after a valid token has been provided
|
||||||
err!("Invalid password");
|
PasswordOrOtpData {
|
||||||
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let type_ = data.Type.into_i32()?;
|
let type_ = data.Type.into_i32()?;
|
||||||
|
|
||||||
@@ -145,22 +155,8 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head
|
|||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
|
if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() {
|
||||||
|
enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?;
|
||||||
if twofactor_disabled {
|
|
||||||
for user_org in
|
|
||||||
UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &mut conn)
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
{
|
|
||||||
if user_org.atype < UserOrgType::Admin {
|
|
||||||
if CONFIG.mail_enabled() {
|
|
||||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &mut conn).await.unwrap();
|
|
||||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
|
||||||
}
|
|
||||||
user_org.delete(&mut conn).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
@@ -175,6 +171,78 @@ async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers:
|
|||||||
disable_twofactor(data, headers, conn).await
|
disable_twofactor(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn enforce_2fa_policy(
|
||||||
|
user: &User,
|
||||||
|
act_uuid: &str,
|
||||||
|
device_type: i32,
|
||||||
|
ip: &std::net::IpAddr,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
for member in UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
{
|
||||||
|
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
|
||||||
|
if member.atype < UserOrgType::Admin {
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap();
|
||||||
|
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||||
|
}
|
||||||
|
let mut member = member;
|
||||||
|
member.revoke();
|
||||||
|
member.save(conn).await?;
|
||||||
|
|
||||||
|
log_event(
|
||||||
|
EventType::OrganizationUserRevoked as i32,
|
||||||
|
&member.uuid,
|
||||||
|
&member.org_uuid,
|
||||||
|
act_uuid,
|
||||||
|
device_type,
|
||||||
|
ip,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn enforce_2fa_policy_for_org(
|
||||||
|
org_uuid: &str,
|
||||||
|
act_uuid: &str,
|
||||||
|
device_type: i32,
|
||||||
|
ip: &std::net::IpAddr,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let org = Organization::find_by_uuid(org_uuid, conn).await.unwrap();
|
||||||
|
for member in UserOrganization::find_confirmed_by_org(org_uuid, conn).await.into_iter() {
|
||||||
|
// Don't enforce the policy for Admins and Owners.
|
||||||
|
if member.atype < UserOrgType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap();
|
||||||
|
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||||
|
}
|
||||||
|
let mut member = member;
|
||||||
|
member.revoke();
|
||||||
|
member.save(conn).await?;
|
||||||
|
|
||||||
|
log_event(
|
||||||
|
EventType::OrganizationUserRevoked as i32,
|
||||||
|
&member.uuid,
|
||||||
|
org_uuid,
|
||||||
|
act_uuid,
|
||||||
|
device_type,
|
||||||
|
ip,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||||
debug!("Sending notifications for incomplete 2FA logins");
|
debug!("Sending notifications for incomplete 2FA logins");
|
||||||
|
|
||||||
|
142
src/api/core/two_factor/protected_actions.rs
Normal file
142
src/api/core/two_factor/protected_actions.rs
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
use chrono::{Duration, NaiveDateTime, Utc};
|
||||||
|
use rocket::Route;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::{EmptyResult, JsonUpcase},
|
||||||
|
auth::Headers,
|
||||||
|
crypto,
|
||||||
|
db::{
|
||||||
|
models::{TwoFactor, TwoFactorType},
|
||||||
|
DbConn,
|
||||||
|
},
|
||||||
|
error::{Error, MapResult},
|
||||||
|
mail, CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![request_otp, verify_otp]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data stored in the TwoFactor table in the db
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct ProtectedActionData {
|
||||||
|
/// Token issued to validate the protected action
|
||||||
|
pub token: String,
|
||||||
|
/// UNIX timestamp of token issue.
|
||||||
|
pub token_sent: i64,
|
||||||
|
// The total amount of attempts
|
||||||
|
pub attempts: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProtectedActionData {
|
||||||
|
pub fn new(token: String) -> Self {
|
||||||
|
Self {
|
||||||
|
token,
|
||||||
|
token_sent: Utc::now().naive_utc().timestamp(),
|
||||||
|
attempts: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_json(&self) -> String {
|
||||||
|
serde_json::to_string(&self).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_json(string: &str) -> Result<Self, Error> {
|
||||||
|
let res: Result<Self, crate::serde_json::Error> = serde_json::from_str(string);
|
||||||
|
match res {
|
||||||
|
Ok(x) => Ok(x),
|
||||||
|
Err(_) => err!("Could not decode ProtectedActionData from string"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_attempt(&mut self) {
|
||||||
|
self.attempts += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/request-otp")]
|
||||||
|
async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.mail_enabled() {
|
||||||
|
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
|
// Only one Protected Action per user is allowed to take place, delete the previous one
|
||||||
|
if let Some(pa) =
|
||||||
|
TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::ProtectedActions as i32, &mut conn).await
|
||||||
|
{
|
||||||
|
pa.delete(&mut conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||||
|
let pa_data = ProtectedActionData::new(generated_token);
|
||||||
|
|
||||||
|
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||||
|
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::ProtectedActions, pa_data.to_json());
|
||||||
|
twofactor.save(&mut conn).await?;
|
||||||
|
|
||||||
|
mail::send_protected_action_token(&user.email, &pa_data.token).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct ProtectedActionVerify {
|
||||||
|
OTP: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/verify-otp", data = "<data>")]
|
||||||
|
async fn verify_otp(data: JsonUpcase<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.mail_enabled() {
|
||||||
|
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user = headers.user;
|
||||||
|
let data: ProtectedActionVerify = data.into_inner().data;
|
||||||
|
|
||||||
|
// Delete the token after one validation attempt
|
||||||
|
// This endpoint only gets called for the vault export, and doesn't need a second attempt
|
||||||
|
validate_protected_action_otp(&data.OTP, &user.uuid, true, &mut conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn validate_protected_action_otp(
|
||||||
|
otp: &str,
|
||||||
|
user_uuid: &str,
|
||||||
|
delete_if_valid: bool,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let pa = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::ProtectedActions as i32, conn)
|
||||||
|
.await
|
||||||
|
.map_res("Protected action token not found, try sending the code again or restart the process")?;
|
||||||
|
let mut pa_data = ProtectedActionData::from_json(&pa.data)?;
|
||||||
|
|
||||||
|
pa_data.add_attempt();
|
||||||
|
// Delete the token after x attempts if it has been used too many times
|
||||||
|
// We use the 6, which should be more then enough for invalid attempts and multiple valid checks
|
||||||
|
if pa_data.attempts > 6 {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
err!("Token has expired")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the token has expired (Using the email 2fa expiration time)
|
||||||
|
let date =
|
||||||
|
NaiveDateTime::from_timestamp_opt(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.");
|
||||||
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
|
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
err!("Token has expired")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !crypto::ct_eq(&pa_data.token, otp) {
|
||||||
|
pa.save(conn).await?;
|
||||||
|
err!("Token is invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
if delete_if_valid {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@@ -7,7 +7,7 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{
|
db::{
|
||||||
@@ -15,6 +15,7 @@ use crate::{
|
|||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::Error,
|
error::Error,
|
||||||
|
util::NumberOrString,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -103,16 +104,17 @@ impl WebauthnRegistration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||||
async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_webauthn(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
if !CONFIG.domain_set() {
|
if !CONFIG.domain_set() {
|
||||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
err!("Invalid password");
|
let user = headers.user;
|
||||||
}
|
|
||||||
|
|
||||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &mut conn).await?;
|
data.validate(&user, false, &mut conn).await?;
|
||||||
|
|
||||||
|
let (enabled, registrations) = get_webauthn_registrations(&user.uuid, &mut conn).await?;
|
||||||
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
@@ -123,12 +125,17 @@ async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||||
async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_webauthn_challenge(
|
||||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
err!("Invalid password");
|
headers: Headers,
|
||||||
}
|
mut conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &mut conn)
|
data.validate(&user, false, &mut conn).await?;
|
||||||
|
|
||||||
|
let registrations = get_webauthn_registrations(&user.uuid, &mut conn)
|
||||||
.await?
|
.await?
|
||||||
.1
|
.1
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -136,16 +143,16 @@ async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: He
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
|
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
|
||||||
headers.user.uuid.as_bytes().to_vec(),
|
user.uuid.as_bytes().to_vec(),
|
||||||
headers.user.email,
|
user.email,
|
||||||
headers.user.name,
|
user.name,
|
||||||
Some(registrations),
|
Some(registrations),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||||
|
|
||||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||||
challenge_value["status"] = "ok".into();
|
challenge_value["status"] = "ok".into();
|
||||||
@@ -158,8 +165,9 @@ async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: He
|
|||||||
struct EnableWebauthnData {
|
struct EnableWebauthnData {
|
||||||
Id: NumberOrString, // 1..5
|
Id: NumberOrString, // 1..5
|
||||||
Name: String,
|
Name: String,
|
||||||
MasterPasswordHash: String,
|
|
||||||
DeviceResponse: RegisterPublicKeyCredentialCopy,
|
DeviceResponse: RegisterPublicKeyCredentialCopy,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
|
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
|
||||||
@@ -246,9 +254,12 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
|
|||||||
let data: EnableWebauthnData = data.into_inner().data;
|
let data: EnableWebauthnData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Retrieve and delete the saved challenge state
|
// Retrieve and delete the saved challenge state
|
||||||
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
||||||
|
@@ -6,7 +6,7 @@ use yubico::{config::Config, verify};
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{
|
db::{
|
||||||
@@ -24,13 +24,14 @@ pub fn routes() -> Vec<Route> {
|
|||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EnableYubikeyData {
|
struct EnableYubikeyData {
|
||||||
MasterPasswordHash: String,
|
|
||||||
Key1: Option<String>,
|
Key1: Option<String>,
|
||||||
Key2: Option<String>,
|
Key2: Option<String>,
|
||||||
Key3: Option<String>,
|
Key3: Option<String>,
|
||||||
Key4: Option<String>,
|
Key4: Option<String>,
|
||||||
Key5: Option<String>,
|
Key5: Option<String>,
|
||||||
Nfc: bool,
|
Nfc: bool,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Debug)]
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
@@ -83,16 +84,14 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-yubikey", data = "<data>")]
|
#[post("/two-factor/get-yubikey", data = "<data>")]
|
||||||
async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_yubikey(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
// Make sure the credentials are set
|
// Make sure the credentials are set
|
||||||
get_yubico_credentials()?;
|
get_yubico_credentials()?;
|
||||||
|
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let user_uuid = &user.uuid;
|
let user_uuid = &user.uuid;
|
||||||
let yubikey_type = TwoFactorType::YubiKey as i32;
|
let yubikey_type = TwoFactorType::YubiKey as i32;
|
||||||
@@ -122,9 +121,12 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||||||
let data: EnableYubikeyData = data.into_inner().data;
|
let data: EnableYubikeyData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash.clone(),
|
||||||
|
Otp: data.Otp.clone(),
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Check if we already have some data
|
// Check if we already have some data
|
||||||
let mut yubikey_data =
|
let mut yubikey_data =
|
||||||
|
252
src/api/icons.rs
252
src/api/icons.rs
@@ -19,7 +19,7 @@ use tokio::{
|
|||||||
net::lookup_host,
|
net::lookup_host,
|
||||||
};
|
};
|
||||||
|
|
||||||
use html5gum::{Emitter, EndTag, HtmlString, InfallibleTokenizer, Readable, StartTag, StringReader, Tokenizer};
|
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
error::Error,
|
error::Error,
|
||||||
@@ -46,10 +46,15 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
|||||||
// Generate the cookie store
|
// Generate the cookie store
|
||||||
let cookie_store = Arc::new(Jar::default());
|
let cookie_store = Arc::new(Jar::default());
|
||||||
|
|
||||||
|
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
|
||||||
|
let pool_idle_timeout = Duration::from_secs(10);
|
||||||
// Reuse the client between requests
|
// Reuse the client between requests
|
||||||
let client = get_reqwest_client_builder()
|
let client = get_reqwest_client_builder()
|
||||||
.cookie_provider(Arc::clone(&cookie_store))
|
.cookie_provider(Arc::clone(&cookie_store))
|
||||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
.timeout(icon_download_timeout)
|
||||||
|
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||||
|
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||||
|
.trust_dns(true)
|
||||||
.default_headers(default_headers.clone());
|
.default_headers(default_headers.clone());
|
||||||
|
|
||||||
match client.build() {
|
match client.build() {
|
||||||
@@ -58,9 +63,11 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
|||||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
||||||
get_reqwest_client_builder()
|
get_reqwest_client_builder()
|
||||||
.cookie_provider(cookie_store)
|
.cookie_provider(cookie_store)
|
||||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
.timeout(icon_download_timeout)
|
||||||
.default_headers(default_headers)
|
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||||
|
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||||
.trust_dns(false)
|
.trust_dns(false)
|
||||||
|
.default_headers(default_headers)
|
||||||
.build()
|
.build()
|
||||||
.expect("Failed to build client")
|
.expect("Failed to build client")
|
||||||
}
|
}
|
||||||
@@ -258,7 +265,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Clone)]
|
||||||
enum DomainBlacklistReason {
|
enum DomainBlacklistReason {
|
||||||
Regex,
|
Regex,
|
||||||
IP,
|
IP,
|
||||||
@@ -415,38 +422,34 @@ fn get_favicons_node(
|
|||||||
const TAG_LINK: &[u8] = b"link";
|
const TAG_LINK: &[u8] = b"link";
|
||||||
const TAG_BASE: &[u8] = b"base";
|
const TAG_BASE: &[u8] = b"base";
|
||||||
const TAG_HEAD: &[u8] = b"head";
|
const TAG_HEAD: &[u8] = b"head";
|
||||||
const ATTR_REL: &[u8] = b"rel";
|
|
||||||
const ATTR_HREF: &[u8] = b"href";
|
const ATTR_HREF: &[u8] = b"href";
|
||||||
const ATTR_SIZES: &[u8] = b"sizes";
|
const ATTR_SIZES: &[u8] = b"sizes";
|
||||||
|
|
||||||
let mut base_url = url.clone();
|
let mut base_url = url.clone();
|
||||||
let mut icon_tags: Vec<StartTag> = Vec::new();
|
let mut icon_tags: Vec<Tag> = Vec::new();
|
||||||
for token in dom {
|
for token in dom {
|
||||||
match token {
|
let tag_name: &[u8] = &token.tag.name;
|
||||||
FaviconToken::StartTag(tag) => {
|
match tag_name {
|
||||||
if *tag.name == TAG_LINK
|
TAG_LINK => {
|
||||||
&& tag.attributes.contains_key(ATTR_REL)
|
icon_tags.push(token.tag);
|
||||||
&& tag.attributes.contains_key(ATTR_HREF)
|
|
||||||
{
|
|
||||||
let rel_value = std::str::from_utf8(tag.attributes.get(ATTR_REL).unwrap())
|
|
||||||
.unwrap_or_default()
|
|
||||||
.to_ascii_lowercase();
|
|
||||||
if rel_value.contains("icon") && !rel_value.contains("mask-icon") {
|
|
||||||
icon_tags.push(tag);
|
|
||||||
}
|
|
||||||
} else if *tag.name == TAG_BASE && tag.attributes.contains_key(ATTR_HREF) {
|
|
||||||
let href = std::str::from_utf8(tag.attributes.get(ATTR_HREF).unwrap()).unwrap_or_default();
|
|
||||||
debug!("Found base href: {href}");
|
|
||||||
base_url = match base_url.join(href) {
|
|
||||||
Ok(inner_url) => inner_url,
|
|
||||||
_ => url.clone(),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
FaviconToken::EndTag(tag) => {
|
TAG_BASE => {
|
||||||
if *tag.name == TAG_HEAD {
|
base_url = if let Some(href) = token.tag.attributes.get(ATTR_HREF) {
|
||||||
break;
|
let href = std::str::from_utf8(href).unwrap_or_default();
|
||||||
}
|
debug!("Found base href: {href}");
|
||||||
|
match base_url.join(href) {
|
||||||
|
Ok(inner_url) => inner_url,
|
||||||
|
_ => continue,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
TAG_HEAD if token.closing => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -531,11 +534,11 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
|||||||
let mut referer = String::new();
|
let mut referer = String::new();
|
||||||
|
|
||||||
if let Ok(content) = resp {
|
if let Ok(content) = resp {
|
||||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
// Extract the URL from the response in case redirects occurred (like @ gitlab.com)
|
||||||
let url = content.url().clone();
|
let url = content.url().clone();
|
||||||
|
|
||||||
// Set the referer to be used on the final request, some sites check this.
|
// Set the referer to be used on the final request, some sites check this.
|
||||||
// Mostly used to prevent direct linking and other security resons.
|
// Mostly used to prevent direct linking and other security reasons.
|
||||||
referer = url.to_string();
|
referer = url.to_string();
|
||||||
|
|
||||||
// Add the fallback favicon.ico and apple-touch-icon.png to the list with the domain the content responded from.
|
// Add the fallback favicon.ico and apple-touch-icon.png to the list with the domain the content responded from.
|
||||||
@@ -635,7 +638,7 @@ fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a Tuple with the width and hight as a seperate value extracted from the sizes attribute
|
/// Returns a Tuple with the width and height as a separate value extracted from the sizes attribute
|
||||||
/// It will return 0 for both values if no match has been found.
|
/// It will return 0 for both values if no match has been found.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
@@ -682,7 +685,9 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
|||||||
|
|
||||||
for icon in icon_result.iconlist.iter().take(5) {
|
for icon in icon_result.iconlist.iter().take(5) {
|
||||||
if icon.href.starts_with("data:image") {
|
if icon.href.starts_with("data:image") {
|
||||||
let Ok(datauri) = DataUrl::process(&icon.href) else {continue};
|
let Ok(datauri) = DataUrl::process(&icon.href) else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
// Check if we are able to decode the data uri
|
// Check if we are able to decode the data uri
|
||||||
let mut body = BytesMut::new();
|
let mut body = BytesMut::new();
|
||||||
match datauri.decode::<_, ()>(|bytes| {
|
match datauri.decode::<_, ()>(|bytes| {
|
||||||
@@ -820,43 +825,64 @@ impl reqwest::cookie::CookieStore for Jar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Custom FaviconEmitter for the html5gum parser.
|
/// Custom FaviconEmitter for the html5gum parser.
|
||||||
/// The FaviconEmitter is using an almost 1:1 copy of the DefaultEmitter with some small changes.
|
/// The FaviconEmitter is using an optimized version of the DefaultEmitter.
|
||||||
/// This prevents emitting tags like comments, doctype and also strings between the tags.
|
/// This prevents emitting tags like comments, doctype and also strings between the tags.
|
||||||
|
/// But it will also only emit the tags we need and only if they have the correct attributes
|
||||||
/// Therefor parsing the HTML content is faster.
|
/// Therefor parsing the HTML content is faster.
|
||||||
use std::collections::{BTreeSet, VecDeque};
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Default)]
|
||||||
enum FaviconToken {
|
pub struct Tag {
|
||||||
StartTag(StartTag),
|
/// The tag's name, such as `"link"` or `"base"`.
|
||||||
EndTag(EndTag),
|
pub name: HtmlString,
|
||||||
|
|
||||||
|
/// A mapping for any HTML attributes this start tag may have.
|
||||||
|
///
|
||||||
|
/// Duplicate attributes are ignored after the first one as per WHATWG spec.
|
||||||
|
pub attributes: BTreeMap<HtmlString, HtmlString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
struct FaviconToken {
|
||||||
|
tag: Tag,
|
||||||
|
closing: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
struct FaviconEmitter {
|
struct FaviconEmitter {
|
||||||
current_token: Option<FaviconToken>,
|
current_token: Option<FaviconToken>,
|
||||||
last_start_tag: HtmlString,
|
last_start_tag: HtmlString,
|
||||||
current_attribute: Option<(HtmlString, HtmlString)>,
|
current_attribute: Option<(HtmlString, HtmlString)>,
|
||||||
seen_attributes: BTreeSet<HtmlString>,
|
emit_token: bool,
|
||||||
emitted_tokens: VecDeque<FaviconToken>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FaviconEmitter {
|
impl FaviconEmitter {
|
||||||
fn emit_token(&mut self, token: FaviconToken) {
|
fn flush_current_attribute(&mut self, emit_current_tag: bool) {
|
||||||
self.emitted_tokens.push_front(token);
|
const ATTR_HREF: &[u8] = b"href";
|
||||||
}
|
const ATTR_REL: &[u8] = b"rel";
|
||||||
|
const TAG_LINK: &[u8] = b"link";
|
||||||
|
const TAG_BASE: &[u8] = b"base";
|
||||||
|
const TAG_HEAD: &[u8] = b"head";
|
||||||
|
|
||||||
fn flush_current_attribute(&mut self) {
|
if let Some(ref mut token) = self.current_token {
|
||||||
if let Some((k, v)) = self.current_attribute.take() {
|
let tag_name: &[u8] = &token.tag.name;
|
||||||
match self.current_token {
|
|
||||||
Some(FaviconToken::StartTag(ref mut tag)) => {
|
if self.current_attribute.is_some() && (tag_name == TAG_BASE || tag_name == TAG_LINK) {
|
||||||
tag.attributes.entry(k).and_modify(|_| {}).or_insert(v);
|
let (k, v) = self.current_attribute.take().unwrap();
|
||||||
}
|
token.tag.attributes.entry(k).and_modify(|_| {}).or_insert(v);
|
||||||
Some(FaviconToken::EndTag(_)) => {
|
}
|
||||||
self.seen_attributes.insert(k);
|
|
||||||
}
|
let tag_attr = &token.tag.attributes;
|
||||||
_ => {
|
match tag_name {
|
||||||
debug_assert!(false);
|
TAG_HEAD if token.closing => self.emit_token = true,
|
||||||
|
TAG_BASE if tag_attr.contains_key(ATTR_HREF) => self.emit_token = true,
|
||||||
|
TAG_LINK if emit_current_tag && tag_attr.contains_key(ATTR_REL) && tag_attr.contains_key(ATTR_HREF) => {
|
||||||
|
let rel_value =
|
||||||
|
std::str::from_utf8(token.tag.attributes.get(ATTR_REL).unwrap()).unwrap_or_default();
|
||||||
|
if rel_value.contains("icon") && !rel_value.contains("mask-icon") {
|
||||||
|
self.emit_token = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
_ => (),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -871,87 +897,71 @@ impl Emitter for FaviconEmitter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn pop_token(&mut self) -> Option<Self::Token> {
|
fn pop_token(&mut self) -> Option<Self::Token> {
|
||||||
self.emitted_tokens.pop_back()
|
if self.emit_token {
|
||||||
}
|
self.emit_token = false;
|
||||||
|
return self.current_token.take();
|
||||||
fn init_start_tag(&mut self) {
|
|
||||||
self.current_token = Some(FaviconToken::StartTag(StartTag::default()));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn init_end_tag(&mut self) {
|
|
||||||
self.current_token = Some(FaviconToken::EndTag(EndTag::default()));
|
|
||||||
self.seen_attributes.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn emit_current_tag(&mut self) -> Option<html5gum::State> {
|
|
||||||
self.flush_current_attribute();
|
|
||||||
let mut token = self.current_token.take().unwrap();
|
|
||||||
let mut emit = false;
|
|
||||||
match token {
|
|
||||||
FaviconToken::EndTag(ref mut tag) => {
|
|
||||||
// Always clean seen attributes
|
|
||||||
self.seen_attributes.clear();
|
|
||||||
self.set_last_start_tag(None);
|
|
||||||
|
|
||||||
// Only trigger an emit for the </head> tag.
|
|
||||||
// This is matched, and will break the for-loop.
|
|
||||||
if *tag.name == b"head" {
|
|
||||||
emit = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
FaviconToken::StartTag(ref mut tag) => {
|
|
||||||
// Only trriger an emit for <link> and <base> tags.
|
|
||||||
// These are the only tags we want to parse.
|
|
||||||
if *tag.name == b"link" || *tag.name == b"base" {
|
|
||||||
self.set_last_start_tag(Some(&tag.name));
|
|
||||||
emit = true;
|
|
||||||
} else {
|
|
||||||
self.set_last_start_tag(None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only emit the tags we want to parse.
|
|
||||||
if emit {
|
|
||||||
self.emit_token(token);
|
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn init_start_tag(&mut self) {
|
||||||
|
self.current_token = Some(FaviconToken {
|
||||||
|
tag: Tag::default(),
|
||||||
|
closing: false,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init_end_tag(&mut self) {
|
||||||
|
self.current_token = Some(FaviconToken {
|
||||||
|
tag: Tag::default(),
|
||||||
|
closing: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn emit_current_tag(&mut self) -> Option<html5gum::State> {
|
||||||
|
self.flush_current_attribute(true);
|
||||||
|
self.last_start_tag.clear();
|
||||||
|
if self.current_token.is_some() && !self.current_token.as_ref().unwrap().closing {
|
||||||
|
self.last_start_tag.extend(&*self.current_token.as_ref().unwrap().tag.name);
|
||||||
|
}
|
||||||
|
html5gum::naive_next_state(&self.last_start_tag)
|
||||||
|
}
|
||||||
|
|
||||||
fn push_tag_name(&mut self, s: &[u8]) {
|
fn push_tag_name(&mut self, s: &[u8]) {
|
||||||
match self.current_token {
|
if let Some(ref mut token) = self.current_token {
|
||||||
Some(
|
token.tag.name.extend(s);
|
||||||
FaviconToken::StartTag(StartTag {
|
|
||||||
ref mut name,
|
|
||||||
..
|
|
||||||
})
|
|
||||||
| FaviconToken::EndTag(EndTag {
|
|
||||||
ref mut name,
|
|
||||||
..
|
|
||||||
}),
|
|
||||||
) => {
|
|
||||||
name.extend(s);
|
|
||||||
}
|
|
||||||
_ => debug_assert!(false),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_attribute(&mut self) {
|
fn init_attribute(&mut self) {
|
||||||
self.flush_current_attribute();
|
self.flush_current_attribute(false);
|
||||||
self.current_attribute = Some(Default::default());
|
self.current_attribute = match &self.current_token {
|
||||||
|
Some(token) => {
|
||||||
|
let tag_name: &[u8] = &token.tag.name;
|
||||||
|
match tag_name {
|
||||||
|
b"link" | b"head" | b"base" => Some(Default::default()),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn push_attribute_name(&mut self, s: &[u8]) {
|
fn push_attribute_name(&mut self, s: &[u8]) {
|
||||||
self.current_attribute.as_mut().unwrap().0.extend(s);
|
if let Some(attr) = &mut self.current_attribute {
|
||||||
|
attr.0.extend(s)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn push_attribute_value(&mut self, s: &[u8]) {
|
fn push_attribute_value(&mut self, s: &[u8]) {
|
||||||
self.current_attribute.as_mut().unwrap().1.extend(s);
|
if let Some(attr) = &mut self.current_attribute {
|
||||||
|
attr.1.extend(s)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn current_is_appropriate_end_tag_token(&mut self) -> bool {
|
fn current_is_appropriate_end_tag_token(&mut self) -> bool {
|
||||||
match self.current_token {
|
match &self.current_token {
|
||||||
Some(FaviconToken::EndTag(ref tag)) => !self.last_start_tag.is_empty() && self.last_start_tag == tag.name,
|
Some(token) if token.closing => !self.last_start_tag.is_empty() && self.last_start_tag == token.tag.name,
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -9,9 +9,12 @@ use serde_json::Value;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::accounts::{PreloginData, RegisterData, _prelogin, _register},
|
core::{
|
||||||
core::log_user_event,
|
accounts::{PreloginData, RegisterData, _prelogin, _register},
|
||||||
core::two_factor::{duo, email, email::EmailTokenData, yubikey},
|
log_user_event,
|
||||||
|
two_factor::{authenticator, duo, email, enforce_2fa_policy, webauthn, yubikey},
|
||||||
|
},
|
||||||
|
push::register_push_device,
|
||||||
ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||||
},
|
},
|
||||||
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
|
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
|
||||||
@@ -103,8 +106,13 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
|||||||
|
|
||||||
// Common
|
// Common
|
||||||
let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
|
let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
|
||||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
// ---
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
// Disabled this variable, it was used to generate the JWT
|
||||||
|
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||||
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
|
// ---
|
||||||
|
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||||
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
let result = json!({
|
let result = json!({
|
||||||
@@ -155,7 +163,27 @@ async fn _password_login(
|
|||||||
|
|
||||||
// Check password
|
// Check password
|
||||||
let password = data.password.as_ref().unwrap();
|
let password = data.password.as_ref().unwrap();
|
||||||
if !user.check_valid_password(password) {
|
if let Some(auth_request_uuid) = data.auth_request.clone() {
|
||||||
|
if let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await {
|
||||||
|
if !auth_request.check_access_code(password) {
|
||||||
|
err!(
|
||||||
|
"Username or access code is incorrect. Try again",
|
||||||
|
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err!(
|
||||||
|
"Auth request not found. Try again.",
|
||||||
|
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else if !user.check_valid_password(password) {
|
||||||
err!(
|
err!(
|
||||||
"Username or password is incorrect. Try again",
|
"Username or password is incorrect. Try again",
|
||||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||||
@@ -222,7 +250,7 @@ async fn _password_login(
|
|||||||
|
|
||||||
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
||||||
|
|
||||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?;
|
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() && new_device {
|
if CONFIG.mail_enabled() && new_device {
|
||||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
||||||
@@ -239,9 +267,17 @@ async fn _password_login(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// register push device
|
||||||
|
register_push_device(&mut device, conn).await?;
|
||||||
|
|
||||||
// Common
|
// Common
|
||||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
// ---
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
// Disabled this variable, it was used to generate the JWT
|
||||||
|
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||||
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
|
// ---
|
||||||
|
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||||
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
@@ -260,6 +296,10 @@ async fn _password_login(
|
|||||||
"ResetMasterPassword": false,// TODO: Same as above
|
"ResetMasterPassword": false,// TODO: Same as above
|
||||||
"scope": scope,
|
"scope": scope,
|
||||||
"unofficialServer": true,
|
"unofficialServer": true,
|
||||||
|
"UserDecryptionOptions": {
|
||||||
|
"HasMasterPassword": !user.password_hash.is_empty(),
|
||||||
|
"Object": "userDecryptionOptions"
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
if let Some(token) = twofactor_token {
|
if let Some(token) = twofactor_token {
|
||||||
@@ -350,8 +390,13 @@ async fn _user_api_key_login(
|
|||||||
|
|
||||||
// Common
|
// Common
|
||||||
let scope_vec = vec!["api".into()];
|
let scope_vec = vec!["api".into()];
|
||||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
// ---
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
// Disabled this variable, it was used to generate the JWT
|
||||||
|
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||||
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
|
// ---
|
||||||
|
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||||
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
||||||
@@ -429,32 +474,32 @@ async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Devi
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn twofactor_auth(
|
async fn twofactor_auth(
|
||||||
user_uuid: &str,
|
user: &User,
|
||||||
data: &ConnectData,
|
data: &ConnectData,
|
||||||
device: &mut Device,
|
device: &mut Device,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> ApiResult<Option<String>> {
|
) -> ApiResult<Option<String>> {
|
||||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn).await;
|
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
|
||||||
|
|
||||||
// No twofactor token if twofactor is disabled
|
// No twofactor token if twofactor is disabled
|
||||||
if twofactors.is_empty() {
|
if twofactors.is_empty() {
|
||||||
|
enforce_2fa_policy(user, &user.uuid, device.atype, &ip.ip, conn).await?;
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?;
|
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, ip, conn).await?;
|
||||||
|
|
||||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
|
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
||||||
|
|
||||||
let twofactor_code = match data.two_factor_token {
|
let twofactor_code = match data.two_factor_token {
|
||||||
Some(ref code) => code,
|
Some(ref code) => code,
|
||||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, "2FA token not provided"),
|
None => err_json!(_json_err_twofactor(&twofactor_ids, &user.uuid, conn).await?, "2FA token not provided"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||||
|
|
||||||
use crate::api::core::two_factor as _tf;
|
|
||||||
use crate::crypto::ct_eq;
|
use crate::crypto::ct_eq;
|
||||||
|
|
||||||
let selected_data = _selected_data(selected_twofactor);
|
let selected_data = _selected_data(selected_twofactor);
|
||||||
@@ -462,17 +507,15 @@ async fn twofactor_auth(
|
|||||||
|
|
||||||
match TwoFactorType::from_i32(selected_id) {
|
match TwoFactorType::from_i32(selected_id) {
|
||||||
Some(TwoFactorType::Authenticator) => {
|
Some(TwoFactorType::Authenticator) => {
|
||||||
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn).await?
|
authenticator::validate_totp_code_str(&user.uuid, twofactor_code, &selected_data?, ip, conn).await?
|
||||||
}
|
}
|
||||||
Some(TwoFactorType::Webauthn) => {
|
Some(TwoFactorType::Webauthn) => webauthn::validate_webauthn_login(&user.uuid, twofactor_code, conn).await?,
|
||||||
_tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await?
|
Some(TwoFactorType::YubiKey) => yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
||||||
}
|
|
||||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
|
||||||
Some(TwoFactorType::Duo) => {
|
Some(TwoFactorType::Duo) => {
|
||||||
_tf::duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
|
duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
|
||||||
}
|
}
|
||||||
Some(TwoFactorType::Email) => {
|
Some(TwoFactorType::Email) => {
|
||||||
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn).await?
|
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, conn).await?
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(TwoFactorType::Remember) => {
|
Some(TwoFactorType::Remember) => {
|
||||||
@@ -482,7 +525,7 @@ async fn twofactor_auth(
|
|||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
err_json!(
|
err_json!(
|
||||||
_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?,
|
_json_err_twofactor(&twofactor_ids, &user.uuid, conn).await?,
|
||||||
"2FA Remember token not provided"
|
"2FA Remember token not provided"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -496,7 +539,7 @@ async fn twofactor_auth(
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?;
|
TwoFactorIncomplete::mark_complete(&user.uuid, &device.uuid, conn).await?;
|
||||||
|
|
||||||
if !CONFIG.disable_2fa_remember() && remember == 1 {
|
if !CONFIG.disable_2fa_remember() && remember == 1 {
|
||||||
Ok(Some(device.refresh_twofactor_remember()))
|
Ok(Some(device.refresh_twofactor_remember()))
|
||||||
@@ -511,8 +554,6 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> {
|
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> {
|
||||||
use crate::api::core::two_factor;
|
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
"error" : "invalid_grant",
|
"error" : "invalid_grant",
|
||||||
"error_description" : "Two factor required.",
|
"error_description" : "Two factor required.",
|
||||||
@@ -527,7 +568,7 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||||
|
|
||||||
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||||
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
let request = webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -559,8 +600,6 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
}
|
}
|
||||||
|
|
||||||
Some(tf_type @ TwoFactorType::Email) => {
|
Some(tf_type @ TwoFactorType::Email) => {
|
||||||
use crate::api::core::two_factor as _tf;
|
|
||||||
|
|
||||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
||||||
Some(tf) => tf,
|
Some(tf) => tf,
|
||||||
None => err!("No twofactor email registered"),
|
None => err!("No twofactor email registered"),
|
||||||
@@ -568,10 +607,10 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
|
|
||||||
// Send email immediately if email is the only 2FA option
|
// Send email immediately if email is the only 2FA option
|
||||||
if providers.len() == 1 {
|
if providers.len() == 1 {
|
||||||
_tf::email::send_token(user_uuid, conn).await?
|
email::send_token(user_uuid, conn).await?
|
||||||
}
|
}
|
||||||
|
|
||||||
let email_data = EmailTokenData::from_json(&twofactor.data)?;
|
let email_data = email::EmailTokenData::from_json(&twofactor.data)?;
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
"Email": email::obscure_email(&email_data.email),
|
"Email": email::obscure_email(&email_data.email),
|
||||||
})
|
})
|
||||||
@@ -646,6 +685,8 @@ struct ConnectData {
|
|||||||
#[field(name = uncased("two_factor_remember"))]
|
#[field(name = uncased("two_factor_remember"))]
|
||||||
#[field(name = uncased("twofactorremember"))]
|
#[field(name = uncased("twofactorremember"))]
|
||||||
two_factor_remember: Option<i32>,
|
two_factor_remember: Option<i32>,
|
||||||
|
#[field(name = uncased("authrequest"))]
|
||||||
|
auth_request: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
||||||
|
@@ -13,6 +13,7 @@ pub use crate::api::{
|
|||||||
admin::catchers as admin_catchers,
|
admin::catchers as admin_catchers,
|
||||||
admin::routes as admin_routes,
|
admin::routes as admin_routes,
|
||||||
core::catchers as core_catchers,
|
core::catchers as core_catchers,
|
||||||
|
core::purge_auth_requests,
|
||||||
core::purge_sends,
|
core::purge_sends,
|
||||||
core::purge_trashed_ciphers,
|
core::purge_trashed_ciphers,
|
||||||
core::routes as core_routes,
|
core::routes as core_routes,
|
||||||
@@ -22,7 +23,7 @@ pub use crate::api::{
|
|||||||
icons::routes as icons_routes,
|
icons::routes as icons_routes,
|
||||||
identity::routes as identity_routes,
|
identity::routes as identity_routes,
|
||||||
notifications::routes as notifications_routes,
|
notifications::routes as notifications_routes,
|
||||||
notifications::{start_notification_server, Notify, UpdateType},
|
notifications::{start_notification_server, AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS},
|
||||||
push::{
|
push::{
|
||||||
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
|
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
|
||||||
unregister_push_device,
|
unregister_push_device,
|
||||||
@@ -31,6 +32,7 @@ pub use crate::api::{
|
|||||||
web::routes as web_routes,
|
web::routes as web_routes,
|
||||||
web::static_files,
|
web::static_files,
|
||||||
};
|
};
|
||||||
|
use crate::db::{models::User, DbConn};
|
||||||
use crate::util;
|
use crate::util;
|
||||||
|
|
||||||
// Type aliases for API methods results
|
// Type aliases for API methods results
|
||||||
@@ -45,33 +47,29 @@ type JsonVec<T> = Json<Vec<T>>;
|
|||||||
// Common structs representing JSON data received
|
// Common structs representing JSON data received
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct PasswordData {
|
struct PasswordOrOtpData {
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Clone)]
|
impl PasswordOrOtpData {
|
||||||
#[serde(untagged)]
|
/// Tokens used via this struct can be used multiple times during the process
|
||||||
enum NumberOrString {
|
/// First for the validation to continue, after that to enable or validate the following actions
|
||||||
Number(i32),
|
/// This is different per caller, so it can be adjusted to delete the token or not
|
||||||
String(String),
|
pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
|
||||||
}
|
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
|
||||||
|
|
||||||
impl NumberOrString {
|
match (self.MasterPasswordHash.as_deref(), self.Otp.as_deref()) {
|
||||||
fn into_string(self) -> String {
|
(Some(pw_hash), None) => {
|
||||||
match self {
|
if !user.check_valid_password(pw_hash) {
|
||||||
NumberOrString::Number(n) => n.to_string(),
|
err!("Invalid password");
|
||||||
NumberOrString::String(s) => s,
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::wrong_self_convention)]
|
|
||||||
fn into_i32(&self) -> ApiResult<i32> {
|
|
||||||
use std::num::ParseIntError as PIE;
|
|
||||||
match self {
|
|
||||||
NumberOrString::Number(n) => Ok(*n),
|
|
||||||
NumberOrString::String(s) => {
|
|
||||||
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
|
||||||
}
|
}
|
||||||
|
(None, Some(otp)) => {
|
||||||
|
validate_protected_action_otp(otp, &user.uuid, delete_if_valid, conn).await?;
|
||||||
|
}
|
||||||
|
_ => err!("No validation provided"),
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -20,7 +20,7 @@ use tokio_tungstenite::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
auth::ClientIp,
|
auth::{ClientIp, WsAccessTokenHeader},
|
||||||
db::{
|
db::{
|
||||||
models::{Cipher, Folder, Send as DbSend, User},
|
models::{Cipher, Folder, Send as DbSend, User},
|
||||||
DbConn,
|
DbConn,
|
||||||
@@ -36,10 +36,19 @@ static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
|
|||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
use super::{push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update};
|
pub static WS_ANONYMOUS_SUBSCRIPTIONS: Lazy<Arc<AnonymousWebSocketSubscriptions>> = Lazy::new(|| {
|
||||||
|
Arc::new(AnonymousWebSocketSubscriptions {
|
||||||
|
map: Arc::new(dashmap::DashMap::new()),
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
use super::{
|
||||||
|
push::push_auth_request, push::push_auth_response, push_cipher_update, push_folder_update, push_logout,
|
||||||
|
push_send_update, push_user_update,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![websockets_hub]
|
routes![websockets_hub, anonymous_websockets_hub]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromForm, Debug)]
|
#[derive(FromForm, Debug)]
|
||||||
@@ -74,17 +83,50 @@ impl Drop for WSEntryMapGuard {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct WSAnonymousEntryMapGuard {
|
||||||
|
subscriptions: Arc<AnonymousWebSocketSubscriptions>,
|
||||||
|
token: String,
|
||||||
|
addr: IpAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WSAnonymousEntryMapGuard {
|
||||||
|
fn new(subscriptions: Arc<AnonymousWebSocketSubscriptions>, token: String, addr: IpAddr) -> Self {
|
||||||
|
Self {
|
||||||
|
subscriptions,
|
||||||
|
token,
|
||||||
|
addr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for WSAnonymousEntryMapGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
info!("Closing WS connection from {}", self.addr);
|
||||||
|
self.subscriptions.map.remove(&self.token);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/hub?<data..>")]
|
#[get("/hub?<data..>")]
|
||||||
fn websockets_hub<'r>(
|
fn websockets_hub<'r>(
|
||||||
ws: rocket_ws::WebSocket,
|
ws: rocket_ws::WebSocket,
|
||||||
data: WsAccessToken,
|
data: WsAccessToken,
|
||||||
ip: ClientIp,
|
ip: ClientIp,
|
||||||
|
header_token: WsAccessTokenHeader,
|
||||||
) -> Result<rocket_ws::Stream!['r], Error> {
|
) -> Result<rocket_ws::Stream!['r], Error> {
|
||||||
let addr = ip.ip;
|
let addr = ip.ip;
|
||||||
info!("Accepting Rocket WS connection from {addr}");
|
info!("Accepting Rocket WS connection from {addr}");
|
||||||
|
|
||||||
let Some(token) = data.access_token else { err_code!("Invalid claim", 401) };
|
let token = if let Some(token) = data.access_token {
|
||||||
let Ok(claims) = crate::auth::decode_login(&token) else { err_code!("Invalid token", 401) };
|
token
|
||||||
|
} else if let Some(token) = header_token.access_token {
|
||||||
|
token
|
||||||
|
} else {
|
||||||
|
err_code!("Invalid claim", 401)
|
||||||
|
};
|
||||||
|
|
||||||
|
let Ok(claims) = crate::auth::decode_login(&token) else {
|
||||||
|
err_code!("Invalid token", 401)
|
||||||
|
};
|
||||||
|
|
||||||
let (mut rx, guard) = {
|
let (mut rx, guard) = {
|
||||||
let users = Arc::clone(&WS_USERS);
|
let users = Arc::clone(&WS_USERS);
|
||||||
@@ -122,6 +164,82 @@ fn websockets_hub<'r>(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prevent sending anything back when a `Close` Message is received.
|
||||||
|
// Just break the loop
|
||||||
|
Message::Close(_) => break,
|
||||||
|
|
||||||
|
// Just echo anything else the client sends
|
||||||
|
_ => yield message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res = rx.recv() => {
|
||||||
|
match res {
|
||||||
|
Some(res) => yield res,
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = interval.tick() => yield Message::Ping(create_ping())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/anonymous-hub?<token..>")]
|
||||||
|
fn anonymous_websockets_hub<'r>(
|
||||||
|
ws: rocket_ws::WebSocket,
|
||||||
|
token: String,
|
||||||
|
ip: ClientIp,
|
||||||
|
) -> Result<rocket_ws::Stream!['r], Error> {
|
||||||
|
let addr = ip.ip;
|
||||||
|
info!("Accepting Anonymous Rocket WS connection from {addr}");
|
||||||
|
|
||||||
|
let (mut rx, guard) = {
|
||||||
|
let subscriptions = Arc::clone(&WS_ANONYMOUS_SUBSCRIPTIONS);
|
||||||
|
|
||||||
|
// Add a channel to send messages to this client to the map
|
||||||
|
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
||||||
|
subscriptions.map.insert(token.clone(), tx);
|
||||||
|
|
||||||
|
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
||||||
|
(rx, WSAnonymousEntryMapGuard::new(subscriptions, token, addr))
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok({
|
||||||
|
rocket_ws::Stream! { ws => {
|
||||||
|
let mut ws = ws;
|
||||||
|
let _guard = guard;
|
||||||
|
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
res = ws.next() => {
|
||||||
|
match res {
|
||||||
|
Some(Ok(message)) => {
|
||||||
|
match message {
|
||||||
|
// Respond to any pings
|
||||||
|
Message::Ping(ping) => yield Message::Pong(ping),
|
||||||
|
Message::Pong(_) => {/* Ignored */},
|
||||||
|
|
||||||
|
// We should receive an initial message with the protocol and version, and we will reply to it
|
||||||
|
Message::Text(ref message) => {
|
||||||
|
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
|
||||||
|
|
||||||
|
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||||
|
yield Message::binary(INITIAL_RESPONSE);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent sending anything back when a `Close` Message is received.
|
||||||
|
// Just break the loop
|
||||||
|
Message::Close(_) => break,
|
||||||
|
|
||||||
// Just echo anything else the client sends
|
// Just echo anything else the client sends
|
||||||
_ => yield message,
|
_ => yield message,
|
||||||
}
|
}
|
||||||
@@ -352,6 +470,69 @@ impl WebSocketUsers {
|
|||||||
push_send_update(ut, send, acting_device_uuid, conn).await;
|
push_send_update(ut, send, acting_device_uuid, conn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn send_auth_request(
|
||||||
|
&self,
|
||||||
|
user_uuid: &String,
|
||||||
|
auth_request_uuid: &String,
|
||||||
|
acting_device_uuid: &String,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) {
|
||||||
|
let data = create_update(
|
||||||
|
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||||
|
UpdateType::AuthRequest,
|
||||||
|
Some(acting_device_uuid.to_string()),
|
||||||
|
);
|
||||||
|
self.send_update(user_uuid, &data).await;
|
||||||
|
|
||||||
|
if CONFIG.push_enabled() {
|
||||||
|
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn send_auth_response(
|
||||||
|
&self,
|
||||||
|
user_uuid: &String,
|
||||||
|
auth_response_uuid: &str,
|
||||||
|
approving_device_uuid: String,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) {
|
||||||
|
let data = create_update(
|
||||||
|
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||||
|
UpdateType::AuthRequestResponse,
|
||||||
|
approving_device_uuid.clone().into(),
|
||||||
|
);
|
||||||
|
self.send_update(auth_response_uuid, &data).await;
|
||||||
|
|
||||||
|
if CONFIG.push_enabled() {
|
||||||
|
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct AnonymousWebSocketSubscriptions {
|
||||||
|
map: Arc<dashmap::DashMap<String, Sender<Message>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AnonymousWebSocketSubscriptions {
|
||||||
|
async fn send_update(&self, token: &str, data: &[u8]) {
|
||||||
|
if let Some(sender) = self.map.get(token).map(|v| v.clone()) {
|
||||||
|
if let Err(e) = sender.send(Message::binary(data)).await {
|
||||||
|
error!("Error sending WS update {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) {
|
||||||
|
let data = create_anonymous_update(
|
||||||
|
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||||
|
UpdateType::AuthRequestResponse,
|
||||||
|
user_uuid.to_string(),
|
||||||
|
);
|
||||||
|
self.send_update(auth_response_uuid, &data).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Message Structure
|
/* Message Structure
|
||||||
@@ -387,6 +568,24 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui
|
|||||||
serialize(value)
|
serialize(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: String) -> Vec<u8> {
|
||||||
|
use rmpv::Value as V;
|
||||||
|
|
||||||
|
let value = V::Array(vec![
|
||||||
|
1.into(),
|
||||||
|
V::Map(vec![]),
|
||||||
|
V::Nil,
|
||||||
|
"AuthRequestResponseRecieved".into(),
|
||||||
|
V::Array(vec![V::Map(vec![
|
||||||
|
("Type".into(), (ut as i32).into()),
|
||||||
|
("Payload".into(), payload.into()),
|
||||||
|
("UserId".into(), user_id.into()),
|
||||||
|
])]),
|
||||||
|
]);
|
||||||
|
|
||||||
|
serialize(value)
|
||||||
|
}
|
||||||
|
|
||||||
fn create_ping() -> Vec<u8> {
|
fn create_ping() -> Vec<u8> {
|
||||||
serialize(Value::Array(vec![6.into()]))
|
serialize(Value::Array(vec![6.into()]))
|
||||||
}
|
}
|
||||||
@@ -420,6 +619,7 @@ pub enum UpdateType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
|
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
|
||||||
|
pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>;
|
||||||
|
|
||||||
pub fn start_notification_server() -> Arc<WebSocketUsers> {
|
pub fn start_notification_server() -> Arc<WebSocketUsers> {
|
||||||
let users = Arc::clone(&WS_USERS);
|
let users = Arc::clone(&WS_USERS);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user