mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 18:55:57 +03:00
Compare commits
191 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
b557c11724 | ||
|
a1204cc935 | ||
|
1ea511cbfc | ||
|
2e6a6fa39f | ||
|
e7d5c17ff7 | ||
|
a7be8fab9b | ||
|
39d4d31080 | ||
|
c28246cf34 | ||
|
d7df0ad79e | ||
|
7c8ba0c232 | ||
|
d335187172 | ||
|
f858523d92 | ||
|
529c39c6c5 | ||
|
b428481ac0 | ||
|
b4b2701905 | ||
|
de66e56b6c | ||
|
ecfebaf3c7 | ||
|
0e53f58288 | ||
|
bc7ceb2ee3 | ||
|
b27e6e30c9 | ||
|
505b30eec2 | ||
|
54bfcb8bc3 | ||
|
035f694d2f | ||
|
a4ab014ade | ||
|
6fedfceaa9 | ||
|
8e8483481f | ||
|
d04b94b77d | ||
|
247d0706ff | ||
|
0e8b410798 | ||
|
fda77afc2a | ||
|
d9835f530c | ||
|
bd91964170 | ||
|
d42b264a93 | ||
|
a4c7fadbf4 | ||
|
8e2a87fd79 | ||
|
4233dbf3db | ||
|
a2bf8def2a | ||
|
8f05a90b96 | ||
|
9082e7cebb | ||
|
55fdee3bf8 | ||
|
377969ea67 | ||
|
f05398a6b3 | ||
|
9555ac7bb8 | ||
|
f01ef40a8e | ||
|
8e7b27cc36 | ||
|
d230ee087c | ||
|
f8f14727b9 | ||
|
753a9e0bae | ||
|
f5fb69b64f | ||
|
3261534438 | ||
|
46762d9fde | ||
|
6cadb2627a | ||
|
0fe93edea6 | ||
|
e9aa5a545e | ||
|
9dcc738f85 | ||
|
84a7c7da5d | ||
|
ca9234ed86 | ||
|
27dc67fadd | ||
|
2ad33ec97f | ||
|
e1a8df96db | ||
|
e42a37c6c1 | ||
|
129b835ac7 | ||
|
2d98aa3045 | ||
|
93636eb3c3 | ||
|
1e42755187 | ||
|
ce8efcc48f | ||
|
79ce5b49bc | ||
|
7c3cad197c | ||
|
000c606029 | ||
|
29144b2ce0 | ||
|
ea04b6f151 | ||
|
3427217686 | ||
|
a1fbd6d729 | ||
|
2cbfe6fa5b | ||
|
d86c4f2c23 | ||
|
6d73f30b4f | ||
|
d0c22b9fc9 | ||
|
d6b97090fa | ||
|
94b077cb2d | ||
|
bb2412d033 | ||
|
b9bdc9b8e2 | ||
|
897bdf8343 | ||
|
569add453d | ||
|
77cd5b5954 | ||
|
4438da39f9 | ||
|
0b2383ab56 | ||
|
ad1d65bdf8 | ||
|
3b283c289e | ||
|
4b9384cb2b | ||
|
0f39d96518 | ||
|
edf7484a70 | ||
|
8b66e34415 | ||
|
1d00e34bbb | ||
|
1b801406d6 | ||
|
5e46a43306 | ||
|
5c77431c2d | ||
|
2775c6ce8a | ||
|
890e668071 | ||
|
596c167312 | ||
|
ae3a153bdb | ||
|
2c36993792 | ||
|
d672ad3f76 | ||
|
a641b48884 | ||
|
98b2178c7d | ||
|
76a3f0f531 | ||
|
c5665e7b77 | ||
|
cbdcf8ef9f | ||
|
3337594d60 | ||
|
2daa8be1f1 | ||
|
eccb3ab947 | ||
|
3246251f29 | ||
|
8ab200224e | ||
|
34e00e1478 | ||
|
0fdda3bc2f | ||
|
48836501bf | ||
|
f863ffb89a | ||
|
03c6ed2e07 | ||
|
efc6eb0073 | ||
|
cec1e87679 | ||
|
512b3b9b7c | ||
|
93da5091e6 | ||
|
915496c103 | ||
|
ecb31c85d6 | ||
|
d722328f05 | ||
|
cb4b683dcd | ||
|
6eaf131922 | ||
|
8933ac2ee7 | ||
|
6822e445bb | ||
|
18fbc1ccf6 | ||
|
4861f6decc | ||
|
b435ee49ad | ||
|
193f86e43e | ||
|
66a7baa67c | ||
|
18d66474e0 | ||
|
ff8db4fd78 | ||
|
b2f9af718e | ||
|
198fd2fc1d | ||
|
ec8a9c82df | ||
|
ef5e0bd4e5 | ||
|
30b408eaa9 | ||
|
e205e3b7db | ||
|
ca1a9e26d8 | ||
|
f3a1385aee | ||
|
008a2cf298 | ||
|
f0c9a7fbc3 | ||
|
9162b13123 | ||
|
480bf9b0c1 | ||
|
f96c5e8a1e | ||
|
3d4be24902 | ||
|
bf41d74501 | ||
|
01e33a4919 | ||
|
bc26bfa589 | ||
|
ccc51e7580 | ||
|
99a59bc4f3 | ||
|
a77482575a | ||
|
bbd630f1ee | ||
|
d18b793c71 | ||
|
d3a1d875d5 | ||
|
d6e0ace192 | ||
|
60cbfa59bf | ||
|
5ab7010c37 | ||
|
ad2cfd8b97 | ||
|
32543c46da | ||
|
66bff73ebf | ||
|
83d5432cbf | ||
|
f579a4154c | ||
|
f5a19c5f8b | ||
|
aa9bc1f785 | ||
|
f162e85e44 | ||
|
33ef70c192 | ||
|
3d2df6ce11 | ||
|
6cdcb3b297 | ||
|
d1af468700 | ||
|
ae1c53f4e5 | ||
|
bc57c4b193 | ||
|
61ae4c9cf5 | ||
|
8d7b3db33d | ||
|
e9ec3741ae | ||
|
dacd50f3f1 | ||
|
9412112639 | ||
|
aaeae16983 | ||
|
d892880dd2 | ||
|
4395e8e888 | ||
|
3dbfc484a5 | ||
|
4ec2507073 | ||
|
ab65d7989b | ||
|
8707728cdb | ||
|
631d022e17 | ||
|
211f4492fa | ||
|
61f9081827 | ||
|
a8e5384c4a |
554
.env.template
554
.env.template
@@ -10,22 +10,63 @@
|
|||||||
## variable ENV_FILE can be set to the location of this file prior to starting
|
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||||
## Vaultwarden.
|
## Vaultwarden.
|
||||||
|
|
||||||
|
####################
|
||||||
|
### Data folders ###
|
||||||
|
####################
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
|
# ATTACHMENTS_FOLDER=data/attachments
|
||||||
|
# SENDS_FOLDER=data/sends
|
||||||
|
# TMP_FOLDER=data/tmp
|
||||||
|
|
||||||
|
## Templates data folder, by default uses embedded templates
|
||||||
|
## Check source code to see the format
|
||||||
|
# TEMPLATES_FOLDER=data/templates
|
||||||
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
|
## Web vault settings
|
||||||
|
# WEB_VAULT_FOLDER=web-vault/
|
||||||
|
# WEB_VAULT_ENABLED=true
|
||||||
|
|
||||||
|
#########################
|
||||||
|
### Database settings ###
|
||||||
|
#########################
|
||||||
|
|
||||||
## Database URL
|
## Database URL
|
||||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||||
# DATABASE_URL=data/db.sqlite3
|
# DATABASE_URL=data/db.sqlite3
|
||||||
## When using MySQL, specify an appropriate connection URI.
|
## When using MySQL, specify an appropriate connection URI.
|
||||||
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
||||||
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||||
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||||
## or keyword/value connection string.
|
## or keyword/value connection string.
|
||||||
## Details:
|
## Details:
|
||||||
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
## - https://docs.diesel.rs/2.1.x/diesel/pg/struct.PgConnection.html
|
||||||
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||||
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||||
|
|
||||||
|
## Enable WAL for the DB
|
||||||
|
## Set to false to avoid enabling WAL during startup.
|
||||||
|
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||||
|
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||||
|
## Please read project wiki page about this setting first before changing the value as it can
|
||||||
|
## cause performance degradation or might render the service unable to start.
|
||||||
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
|
## Database connection retries
|
||||||
|
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||||
|
# DB_CONNECTION_RETRIES=15
|
||||||
|
|
||||||
|
## Database timeout
|
||||||
|
## Timeout when acquiring database connection
|
||||||
|
# DATABASE_TIMEOUT=30
|
||||||
|
|
||||||
## Database max connections
|
## Database max connections
|
||||||
## Define the size of the connection pool used for connecting to the database.
|
## Define the size of the connection pool used for connecting to the database.
|
||||||
# DATABASE_MAX_CONNS=10
|
# DATABASE_MAX_CONNS=10
|
||||||
@@ -39,70 +80,36 @@
|
|||||||
## - PostgreSQL: ""
|
## - PostgreSQL: ""
|
||||||
# DATABASE_CONN_INIT=""
|
# DATABASE_CONN_INIT=""
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
#################
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
### WebSocket ###
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
#################
|
||||||
# ATTACHMENTS_FOLDER=data/attachments
|
|
||||||
# SENDS_FOLDER=data/sends
|
|
||||||
# TMP_FOLDER=data/tmp
|
|
||||||
|
|
||||||
## Templates data folder, by default uses embedded templates
|
## Enable websocket notifications
|
||||||
## Check source code to see the format
|
# ENABLE_WEBSOCKET=true
|
||||||
# TEMPLATES_FOLDER=/path/to/templates
|
|
||||||
## Automatically reload the templates for every request, slow, use only for development
|
|
||||||
# RELOAD_TEMPLATES=false
|
|
||||||
|
|
||||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
##########################
|
||||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
### Push notifications ###
|
||||||
# IP_HEADER=X-Real-IP
|
##########################
|
||||||
|
|
||||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_TTL=2592000
|
|
||||||
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_NEGTTL=259200
|
|
||||||
|
|
||||||
## Web vault settings
|
|
||||||
# WEB_VAULT_FOLDER=web-vault/
|
|
||||||
# WEB_VAULT_ENABLED=true
|
|
||||||
|
|
||||||
## Enables websocket notifications
|
|
||||||
# WEBSOCKET_ENABLED=false
|
|
||||||
|
|
||||||
## Controls the WebSocket server address and port
|
|
||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
|
||||||
# WEBSOCKET_PORT=3012
|
|
||||||
|
|
||||||
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||||
# PUSH_ENABLED=true
|
## Details about mobile client push notification:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
||||||
|
# PUSH_ENABLED=false
|
||||||
# PUSH_INSTALLATION_ID=CHANGEME
|
# PUSH_INSTALLATION_ID=CHANGEME
|
||||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||||
## Don't change this unless you know what you're doing.
|
|
||||||
# PUSH_RELAY_BASE_URI=https://push.bitwarden.com
|
|
||||||
|
|
||||||
## Controls whether users are allowed to create Bitwarden Sends.
|
# WARNING: Do not modify the following settings unless you fully understand their implications!
|
||||||
## This setting applies globally to all users.
|
# Default Push Relay and Identity URIs
|
||||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
# PUSH_RELAY_URI=https://push.bitwarden.com
|
||||||
# SENDS_ALLOWED=true
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
||||||
|
# European Union Data Region Settings
|
||||||
|
# If you have selected "European Union" as your data region, use the following URIs instead.
|
||||||
|
# PUSH_RELAY_URI=https://api.bitwarden.eu
|
||||||
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.eu
|
||||||
|
|
||||||
## Controls whether users can enable emergency access to their accounts.
|
#####################
|
||||||
## This setting applies globally to all users.
|
### Schedule jobs ###
|
||||||
# EMERGENCY_ACCESS_ALLOWED=true
|
#####################
|
||||||
|
|
||||||
## Controls whether event logging is enabled for organizations
|
|
||||||
## This setting applies to organizations.
|
|
||||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
|
||||||
# ORG_EVENTS_ENABLED=false
|
|
||||||
|
|
||||||
## Number of days to retain events stored in the database.
|
|
||||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
|
||||||
# EVENTS_DAYS_RETAIN=
|
|
||||||
|
|
||||||
## BETA FEATURE: Groups
|
|
||||||
## Controls whether group support is enabled for organizations
|
|
||||||
## This setting applies to organizations.
|
|
||||||
## Disabled by default because this is a beta feature, it contains known issues!
|
|
||||||
## KNOW WHAT YOU ARE DOING!
|
|
||||||
# ORG_GROUPS_ENABLED=false
|
|
||||||
|
|
||||||
## Job scheduler settings
|
## Job scheduler settings
|
||||||
##
|
##
|
||||||
@@ -143,60 +150,73 @@
|
|||||||
## Cron schedule of the job that cleans old events from the event table.
|
## Cron schedule of the job that cleans old events from the event table.
|
||||||
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
||||||
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
||||||
|
## Number of days to retain events stored in the database.
|
||||||
## Enable extended logging, which shows timestamps and targets in the logs
|
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||||
# EXTENDED_LOGGING=true
|
# EVENTS_DAYS_RETAIN=
|
||||||
|
|
||||||
## Timestamp format used in extended logging.
|
|
||||||
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
|
||||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
|
||||||
|
|
||||||
## Logging to file
|
|
||||||
# LOG_FILE=/path/to/log
|
|
||||||
|
|
||||||
## Logging to Syslog
|
|
||||||
## This requires extended logging
|
|
||||||
# USE_SYSLOG=false
|
|
||||||
|
|
||||||
## Log level
|
|
||||||
## Change the verbosity of the log output
|
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
|
||||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
|
||||||
## routes and static file, websocket and alive requests
|
|
||||||
# LOG_LEVEL=Info
|
|
||||||
|
|
||||||
## Enable WAL for the DB
|
|
||||||
## Set to false to avoid enabling WAL during startup.
|
|
||||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
|
||||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
|
||||||
## Please read project wiki page about this setting first before changing the value as it can
|
|
||||||
## cause performance degradation or might render the service unable to start.
|
|
||||||
# ENABLE_DB_WAL=true
|
|
||||||
|
|
||||||
## Database connection retries
|
|
||||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
|
||||||
# DB_CONNECTION_RETRIES=15
|
|
||||||
|
|
||||||
## Icon service
|
|
||||||
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
|
||||||
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
|
||||||
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
|
||||||
##
|
##
|
||||||
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
## Cron schedule of the job that cleans old auth requests from the auth request.
|
||||||
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
## redirect to the corresponding icon at the external service. An external service may
|
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
||||||
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
##
|
||||||
## you are concerned that someone may probe your instance to try to detect whether icons
|
## Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
|
||||||
## for certain sites have been cached.
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
# ICON_SERVICE=internal
|
# DUO_CONTEXT_PURGE_SCHEDULE="30 * * * * *"
|
||||||
|
|
||||||
## Icon redirect code
|
########################
|
||||||
## The HTTP status code to use for redirects to an external icon service.
|
### General settings ###
|
||||||
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
########################
|
||||||
## Temporary redirects are useful while testing different icon services, but once a service
|
|
||||||
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
## Domain settings
|
||||||
## are currently better supported by the Bitwarden clients.
|
## The domain must match the address from where you access the server
|
||||||
# ICON_REDIRECT_CODE=302
|
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||||
|
## like attachment downloads, email links and U2F.
|
||||||
|
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||||
|
## To use HTTPS, the recommended way is to put Vaultwarden behind a reverse proxy
|
||||||
|
## Details:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples
|
||||||
|
## For development
|
||||||
|
# DOMAIN=http://localhost
|
||||||
|
## For public server
|
||||||
|
# DOMAIN=https://vw.domain.tld
|
||||||
|
## For public server (URL with port number)
|
||||||
|
# DOMAIN=https://vw.domain.tld:8443
|
||||||
|
## For public server (URL with path)
|
||||||
|
# DOMAIN=https://domain.tld/vw
|
||||||
|
|
||||||
|
## Controls whether users are allowed to create Bitwarden Sends.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||||
|
# SENDS_ALLOWED=true
|
||||||
|
|
||||||
|
## HIBP Api Key
|
||||||
|
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||||
|
# HIBP_API_KEY=
|
||||||
|
|
||||||
|
## Per-organization attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per organization.
|
||||||
|
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||||
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||||
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user send storage limit (KB)
|
||||||
|
## Max kilobytes of send storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further sends.
|
||||||
|
# USER_SEND_LIMIT=
|
||||||
|
|
||||||
|
## Number of days to wait before auto-deleting a trashed item.
|
||||||
|
## If unset (the default), trashed items are not auto-deleted.
|
||||||
|
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||||
|
# TRASH_AUTO_DELETE_DAYS=
|
||||||
|
|
||||||
|
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||||
|
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||||
|
## master password was provided but the required 2FA step was not completed, which
|
||||||
|
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||||
|
|
||||||
## Disable icon downloading
|
## Disable icon downloading
|
||||||
## Set to true to disable icon downloading in the internal icon service.
|
## Set to true to disable icon downloading in the internal icon service.
|
||||||
@@ -205,38 +225,6 @@
|
|||||||
## will be deleted eventually, but won't be downloaded again.
|
## will be deleted eventually, but won't be downloaded again.
|
||||||
# DISABLE_ICON_DOWNLOAD=false
|
# DISABLE_ICON_DOWNLOAD=false
|
||||||
|
|
||||||
## Icon download timeout
|
|
||||||
## Configure the timeout value when downloading the favicons.
|
|
||||||
## The default is 10 seconds, but this could be to low on slower network connections
|
|
||||||
# ICON_DOWNLOAD_TIMEOUT=10
|
|
||||||
|
|
||||||
## Icon blacklist Regex
|
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
|
||||||
## NOTE: Always enclose this regex withing single quotes!
|
|
||||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
|
||||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
|
||||||
|
|
||||||
## Disable 2FA remember
|
|
||||||
## Enabling this would force the users to use a second factor to login every time.
|
|
||||||
## Note that the checkbox would still be present, but ignored.
|
|
||||||
# DISABLE_2FA_REMEMBER=false
|
|
||||||
|
|
||||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
|
||||||
# EMAIL_ATTEMPTS_LIMIT=3
|
|
||||||
|
|
||||||
## Token expiration time
|
|
||||||
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
|
||||||
# EMAIL_EXPIRATION_TIME=600
|
|
||||||
|
|
||||||
## Email token size
|
|
||||||
## Number of digits in an email 2FA token (min: 6, max: 255).
|
|
||||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
|
||||||
# EMAIL_TOKEN_SIZE=6
|
|
||||||
|
|
||||||
## Controls if new users can register
|
## Controls if new users can register
|
||||||
# SIGNUPS_ALLOWED=true
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
@@ -258,6 +246,11 @@
|
|||||||
## even if SIGNUPS_ALLOWED is set to false
|
## even if SIGNUPS_ALLOWED is set to false
|
||||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||||
|
|
||||||
|
## Controls whether event logging is enabled for organizations
|
||||||
|
## This setting applies to organizations.
|
||||||
|
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||||
|
# ORG_EVENTS_ENABLED=false
|
||||||
|
|
||||||
## Controls which users can create new orgs.
|
## Controls which users can create new orgs.
|
||||||
## Blank or 'all' means all users can create orgs (this is the default):
|
## Blank or 'all' means all users can create orgs (this is the default):
|
||||||
# ORG_CREATION_USERS=
|
# ORG_CREATION_USERS=
|
||||||
@@ -266,6 +259,123 @@
|
|||||||
## A comma-separated list means only those users can create orgs:
|
## A comma-separated list means only those users can create orgs:
|
||||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||||
|
|
||||||
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
|
# INVITATIONS_ALLOWED=true
|
||||||
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
|
# INVITATION_ORG_NAME=Vaultwarden
|
||||||
|
|
||||||
|
## The number of hours after which an organization invite token, emergency access invite token,
|
||||||
|
## email verification token and deletion request token will expire (must be at least 1)
|
||||||
|
# INVITATION_EXPIRATION_HOURS=120
|
||||||
|
|
||||||
|
## Controls whether users can enable emergency access to their accounts.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# EMERGENCY_ACCESS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether users can change their email.
|
||||||
|
## This setting applies globally to all users
|
||||||
|
# EMAIL_CHANGE_ALLOWED=true
|
||||||
|
|
||||||
|
## Number of server-side passwords hashing iterations for the password hash.
|
||||||
|
## The default for new users. If changed, it will be updated during login for existing users.
|
||||||
|
# PASSWORD_ITERATIONS=600000
|
||||||
|
|
||||||
|
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||||
|
# PASSWORD_HINTS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether a password hint should be shown directly in the web page if
|
||||||
|
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||||
|
## as this provides unauthenticated access to potentially sensitive data.
|
||||||
|
# SHOW_PASSWORD_HINT=false
|
||||||
|
|
||||||
|
#########################
|
||||||
|
### Advanced settings ###
|
||||||
|
#########################
|
||||||
|
|
||||||
|
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||||
|
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
|
# IP_HEADER=X-Real-IP
|
||||||
|
|
||||||
|
## Icon service
|
||||||
|
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||||
|
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||||
|
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||||
|
##
|
||||||
|
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||||
|
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||||
|
## redirect to the corresponding icon at the external service. An external service may
|
||||||
|
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||||
|
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||||
|
## for certain sites have been cached.
|
||||||
|
# ICON_SERVICE=internal
|
||||||
|
|
||||||
|
## Icon redirect code
|
||||||
|
## The HTTP status code to use for redirects to an external icon service.
|
||||||
|
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||||
|
## Temporary redirects are useful while testing different icon services, but once a service
|
||||||
|
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||||
|
## are currently better supported by the Bitwarden clients.
|
||||||
|
# ICON_REDIRECT_CODE=302
|
||||||
|
|
||||||
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (30 days)
|
||||||
|
# ICON_CACHE_TTL=2592000
|
||||||
|
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (3 days)
|
||||||
|
# ICON_CACHE_NEGTTL=259200
|
||||||
|
|
||||||
|
## Icon download timeout
|
||||||
|
## Configure the timeout value when downloading the favicons.
|
||||||
|
## The default is 10 seconds, but this could be to low on slower network connections
|
||||||
|
# ICON_DOWNLOAD_TIMEOUT=10
|
||||||
|
|
||||||
|
## Block HTTP domains/IPs by Regex
|
||||||
|
## Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
|
||||||
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
|
# HTTP_REQUEST_BLOCK_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
|
## Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
|
||||||
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
|
# HTTP_REQUEST_BLOCK_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
|
## Client Settings
|
||||||
|
## Enable experimental feature flags for clients.
|
||||||
|
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
|
||||||
|
##
|
||||||
|
## The following flags are available:
|
||||||
|
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
|
||||||
|
## - "autofill-v2": Use the new autofill implementation.
|
||||||
|
## - "browser-fileless-import": Directly import credentials from other providers without a file.
|
||||||
|
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
|
||||||
|
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||||
|
|
||||||
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
|
## If sending the email fails the login attempt will fail!!
|
||||||
|
# REQUIRE_DEVICE_EMAIL=false
|
||||||
|
|
||||||
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
|
## Timestamp format used in extended logging.
|
||||||
|
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||||
|
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||||
|
|
||||||
|
## Logging to Syslog
|
||||||
|
## This requires extended logging
|
||||||
|
# USE_SYSLOG=false
|
||||||
|
|
||||||
|
## Logging to file
|
||||||
|
# LOG_FILE=/path/to/log
|
||||||
|
|
||||||
|
## Log level
|
||||||
|
## Change the verbosity of the log output
|
||||||
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
|
## Setting it to "trace" or "debug" would also show logs for mounted routes and static file, websocket and alive requests
|
||||||
|
## For a specific module append a comma separated `path::to::module=log_level`
|
||||||
|
## For example, to only see debug logs for icons use: LOG_LEVEL="info,vaultwarden::api::icons=debug"
|
||||||
|
# LOG_LEVEL=info
|
||||||
|
|
||||||
## Token for the admin interface, preferably an Argon2 PCH string
|
## Token for the admin interface, preferably an Argon2 PCH string
|
||||||
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
||||||
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
||||||
@@ -281,54 +391,13 @@
|
|||||||
## meant to be used with the use of a separate auth layer in front
|
## meant to be used with the use of a separate auth layer in front
|
||||||
# DISABLE_ADMIN_TOKEN=false
|
# DISABLE_ADMIN_TOKEN=false
|
||||||
|
|
||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||||
# INVITATIONS_ALLOWED=true
|
# ADMIN_RATELIMIT_SECONDS=300
|
||||||
## Name shown in the invitation emails that don't come from a specific organization
|
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||||
# INVITATION_ORG_NAME=Vaultwarden
|
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||||
|
|
||||||
## The number of hours after which an organization invite token, emergency access invite token,
|
## Set the lifetime of admin sessions to this value (in minutes).
|
||||||
## email verification token and deletion request token will expire (must be at least 1)
|
# ADMIN_SESSION_LIFETIME=20
|
||||||
# INVITATION_EXPIRATION_HOURS=120
|
|
||||||
|
|
||||||
## Per-organization attachment storage limit (KB)
|
|
||||||
## Max kilobytes of attachment storage allowed per organization.
|
|
||||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
|
||||||
# ORG_ATTACHMENT_LIMIT=
|
|
||||||
## Per-user attachment storage limit (KB)
|
|
||||||
## Max kilobytes of attachment storage allowed per user.
|
|
||||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
|
||||||
# USER_ATTACHMENT_LIMIT=
|
|
||||||
|
|
||||||
## Number of days to wait before auto-deleting a trashed item.
|
|
||||||
## If unset (the default), trashed items are not auto-deleted.
|
|
||||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
|
||||||
# TRASH_AUTO_DELETE_DAYS=
|
|
||||||
|
|
||||||
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
|
||||||
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
|
||||||
## master password was provided but the required 2FA step was not completed, which
|
|
||||||
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
|
||||||
## This setting applies globally to all users.
|
|
||||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
|
||||||
|
|
||||||
## Number of server-side passwords hashing iterations for the password hash.
|
|
||||||
## The default for new users. If changed, it will be updated during login for existing users.
|
|
||||||
# PASSWORD_ITERATIONS=350000
|
|
||||||
|
|
||||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
|
||||||
# PASSWORD_HINTS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether a password hint should be shown directly in the web page if
|
|
||||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
|
||||||
## as this provides unauthenticated access to potentially sensitive data.
|
|
||||||
# SHOW_PASSWORD_HINT=false
|
|
||||||
|
|
||||||
## Domain settings
|
|
||||||
## The domain must match the address from where you access the server
|
|
||||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
|
||||||
## like attachment downloads, email links and U2F.
|
|
||||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
|
||||||
# DOMAIN=https://vw.domain.tld:8443
|
|
||||||
|
|
||||||
## Allowed iframe ancestors (Know the risks!)
|
## Allowed iframe ancestors (Know the risks!)
|
||||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||||
@@ -343,13 +412,22 @@
|
|||||||
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||||
# LOGIN_RATELIMIT_MAX_BURST=10
|
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||||
|
|
||||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
## BETA FEATURE: Groups
|
||||||
# ADMIN_RATELIMIT_SECONDS=300
|
## Controls whether group support is enabled for organizations
|
||||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
## This setting applies to organizations.
|
||||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
## Disabled by default because this is a beta feature, it contains known issues!
|
||||||
|
## KNOW WHAT YOU ARE DOING!
|
||||||
|
# ORG_GROUPS_ENABLED=false
|
||||||
|
|
||||||
## Set the lifetime of admin sessions to this value (in minutes).
|
## Increase secure note size limit (Know the risks!)
|
||||||
# ADMIN_SESSION_LIFETIME=20
|
## Sets the secure note size limit to 100_000 instead of the default 10_000.
|
||||||
|
## WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
|
||||||
|
## KNOW WHAT YOU ARE DOING!
|
||||||
|
# INCREASE_NOTE_SIZE_LIMIT=false
|
||||||
|
|
||||||
|
########################
|
||||||
|
### MFA/2FA settings ###
|
||||||
|
########################
|
||||||
|
|
||||||
## Yubico (Yubikey) Settings
|
## Yubico (Yubikey) Settings
|
||||||
## Set your Client ID and Secret Key for Yubikey OTP
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
@@ -360,16 +438,46 @@
|
|||||||
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
|
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
|
||||||
|
|
||||||
## Duo Settings
|
## Duo Settings
|
||||||
## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves
|
## You need to configure the DUO_IKEY, DUO_SKEY, and DUO_HOST options to enable global Duo support.
|
||||||
|
## Otherwise users will need to configure it themselves.
|
||||||
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
|
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
|
||||||
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
|
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
|
||||||
## Then set the following options, based on the values obtained from the last step:
|
## Then set the following options, based on the values obtained from the last step:
|
||||||
# DUO_IKEY=<Integration Key>
|
# DUO_IKEY=<Client ID>
|
||||||
# DUO_SKEY=<Secret Key>
|
# DUO_SKEY=<Client Secret>
|
||||||
# DUO_HOST=<API Hostname>
|
# DUO_HOST=<API Hostname>
|
||||||
## After that, you should be able to follow the rest of the guide linked above,
|
## After that, you should be able to follow the rest of the guide linked above,
|
||||||
## ignoring the fields that ask for the values that you already configured beforehand.
|
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||||
|
##
|
||||||
|
## If you want to attempt to use Duo's 'Traditional Prompt' (deprecated, iframe based) set DUO_USE_IFRAME to 'true'.
|
||||||
|
## Duo no longer supports this, but it still works for some integrations.
|
||||||
|
## If you aren't sure, leave this alone.
|
||||||
|
# DUO_USE_IFRAME=false
|
||||||
|
|
||||||
|
## Email 2FA settings
|
||||||
|
## Email token size
|
||||||
|
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||||
|
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||||
|
# EMAIL_TOKEN_SIZE=6
|
||||||
|
##
|
||||||
|
## Token expiration time
|
||||||
|
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||||
|
# EMAIL_EXPIRATION_TIME=600
|
||||||
|
##
|
||||||
|
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||||
|
# EMAIL_ATTEMPTS_LIMIT=3
|
||||||
|
##
|
||||||
|
## Setup email 2FA regardless of any organization policy
|
||||||
|
# EMAIL_2FA_ENFORCE_ON_VERIFIED_INVITE=false
|
||||||
|
## Automatically setup email 2FA as fallback provider when needed
|
||||||
|
# EMAIL_2FA_AUTO_FALLBACK=false
|
||||||
|
|
||||||
|
## Other MFA/2FA settings
|
||||||
|
## Disable 2FA remember
|
||||||
|
## Enabling this would force the users to use a second factor to login every time.
|
||||||
|
## Note that the checkbox would still be present, but ignored.
|
||||||
|
# DISABLE_2FA_REMEMBER=false
|
||||||
|
##
|
||||||
## Authenticator Settings
|
## Authenticator Settings
|
||||||
## Disable authenticator time drifted codes to be valid.
|
## Disable authenticator time drifted codes to be valid.
|
||||||
## TOTP codes of the previous and next 30 seconds will be invalid
|
## TOTP codes of the previous and next 30 seconds will be invalid
|
||||||
@@ -382,12 +490,9 @@
|
|||||||
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
||||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
||||||
|
|
||||||
## Rocket specific settings
|
###########################
|
||||||
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
|
### SMTP Email settings ###
|
||||||
# ROCKET_ADDRESS=0.0.0.0
|
###########################
|
||||||
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
|
||||||
# ROCKET_WORKERS=10
|
|
||||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
|
||||||
|
|
||||||
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
||||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||||
@@ -395,12 +500,19 @@
|
|||||||
# SMTP_HOST=smtp.domain.tld
|
# SMTP_HOST=smtp.domain.tld
|
||||||
# SMTP_FROM=vaultwarden@domain.tld
|
# SMTP_FROM=vaultwarden@domain.tld
|
||||||
# SMTP_FROM_NAME=Vaultwarden
|
# SMTP_FROM_NAME=Vaultwarden
|
||||||
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
|
|
||||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
|
||||||
# SMTP_USERNAME=username
|
# SMTP_USERNAME=username
|
||||||
# SMTP_PASSWORD=password
|
# SMTP_PASSWORD=password
|
||||||
# SMTP_TIMEOUT=15
|
# SMTP_TIMEOUT=15
|
||||||
|
|
||||||
|
## Choose the type of secure connection for SMTP. The default is "starttls".
|
||||||
|
## The available options are:
|
||||||
|
## - "starttls": The default port is 587.
|
||||||
|
## - "force_tls": The default port is 465.
|
||||||
|
## - "off": The default port is 25.
|
||||||
|
## Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
||||||
|
# SMTP_SECURITY=starttls
|
||||||
|
# SMTP_PORT=587
|
||||||
|
|
||||||
# Whether to send mail via the `sendmail` command
|
# Whether to send mail via the `sendmail` command
|
||||||
# USE_SENDMAIL=false
|
# USE_SENDMAIL=false
|
||||||
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||||
@@ -409,7 +521,7 @@
|
|||||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||||
## Multiple options need to be separated by a comma ','.
|
## Multiple options need to be separated by a comma ','.
|
||||||
# SMTP_AUTH_MECHANISM="Plain"
|
# SMTP_AUTH_MECHANISM=
|
||||||
|
|
||||||
## Server name sent during the SMTP HELO
|
## Server name sent during the SMTP HELO
|
||||||
## By default this value should be is on the machine's hostname,
|
## By default this value should be is on the machine's hostname,
|
||||||
@@ -417,30 +529,34 @@
|
|||||||
# HELO_NAME=
|
# HELO_NAME=
|
||||||
|
|
||||||
## Embed images as email attachments
|
## Embed images as email attachments
|
||||||
# SMTP_EMBED_IMAGES=false
|
# SMTP_EMBED_IMAGES=true
|
||||||
|
|
||||||
## SMTP debugging
|
## SMTP debugging
|
||||||
## When set to true this will output very detailed SMTP messages.
|
## When set to true this will output very detailed SMTP messages.
|
||||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
# SMTP_DEBUG=false
|
# SMTP_DEBUG=false
|
||||||
|
|
||||||
## Accept Invalid Hostnames
|
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
|
||||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
|
||||||
|
|
||||||
## Accept Invalid Certificates
|
## Accept Invalid Certificates
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||||
# SMTP_ACCEPT_INVALID_CERTS=false
|
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||||
|
|
||||||
## Require new device emails. When a user logs in an email is required to be sent.
|
## Accept Invalid Hostnames
|
||||||
## If sending the email fails the login attempt will fail!!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
# REQUIRE_DEVICE_EMAIL=false
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
|
#######################
|
||||||
|
### Rocket settings ###
|
||||||
|
#######################
|
||||||
|
|
||||||
|
## Rocket specific settings
|
||||||
|
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
|
||||||
|
# ROCKET_ADDRESS=0.0.0.0
|
||||||
|
## The default port is 8000, unless running in a Docker container, in which case it is 80.
|
||||||
|
# ROCKET_PORT=8000
|
||||||
|
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||||
|
|
||||||
## HIBP Api Key
|
|
||||||
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
|
||||||
# HIBP_API_KEY=
|
|
||||||
|
|
||||||
# vim: syntax=ini
|
# vim: syntax=ini
|
||||||
|
3
.github/CODEOWNERS
vendored
Normal file
3
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
/.github @dani-garcia @BlackDex
|
||||||
|
/.github/CODEOWNERS @dani-garcia @BlackDex
|
||||||
|
/.github/workflows/** @dani-garcia @BlackDex
|
75
.github/workflows/build.yml
vendored
75
.github/workflows/build.yml
vendored
@@ -8,9 +8,11 @@ on:
|
|||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- ".github/workflows/build.yml"
|
- ".github/workflows/build.yml"
|
||||||
@@ -18,19 +20,20 @@ on:
|
|||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
# Make warnings errors, this is to prevent warnings slipping through.
|
# Make warnings errors, this is to prevent warnings slipping through.
|
||||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||||
env:
|
env:
|
||||||
RUSTFLAGS: "-D warnings"
|
RUSTFLAGS: "-D warnings"
|
||||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -43,13 +46,13 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: "Checkout"
|
- name: "Checkout"
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
- name: "Install dependencies Ubuntu"
|
- name: "Install dependencies Ubuntu"
|
||||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||||
# End Install dependencies
|
# End Install dependencies
|
||||||
|
|
||||||
|
|
||||||
@@ -59,7 +62,7 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)"
|
||||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||||
else
|
else
|
||||||
@@ -71,7 +74,7 @@ jobs:
|
|||||||
|
|
||||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||||
- name: "Install rust-toolchain version"
|
- name: "Install rust-toolchain version"
|
||||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 # master @ Jun 13, 2024, 6:20 PM GMT+2
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
@@ -81,17 +84,19 @@ jobs:
|
|||||||
|
|
||||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||||
- name: "Install MSRV version"
|
- name: "Install MSRV version"
|
||||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 # master @ Jun 13, 2024, 6:20 PM GMT+2
|
||||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
# End Install the MSRV channel to be used
|
# End Install the MSRV channel to be used
|
||||||
|
|
||||||
|
# Set the current matrix toolchain version as default
|
||||||
# Enable Rust Caching
|
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||||
- uses: Swatinem/rust-cache@2656b87321093db1cb55fbd73183d195214fdfd1 # v2.5.0
|
run: |
|
||||||
# End Enable Rust Caching
|
# Remove the rust-toolchain.toml
|
||||||
|
rm rust-toolchain.toml
|
||||||
|
# Set the default
|
||||||
|
rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||||
|
|
||||||
# Show environment
|
# Show environment
|
||||||
- name: "Show environment"
|
- name: "Show environment"
|
||||||
@@ -100,47 +105,55 @@ jobs:
|
|||||||
cargo -vV
|
cargo -vV
|
||||||
# End Show environment
|
# End Show environment
|
||||||
|
|
||||||
|
# Enable Rust Caching
|
||||||
|
- uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3
|
||||||
|
with:
|
||||||
|
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||||
|
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||||
|
# Only update when really needed! Use a <year>.<month>[.<inc>] format.
|
||||||
|
prefix-key: "v2023.07-rust"
|
||||||
|
# End Enable Rust Caching
|
||||||
|
|
||||||
# Run cargo tests (In release mode to speed up future builds)
|
# Run cargo tests
|
||||||
# First test all features together, afterwards test them separately.
|
# First test all features together, afterwards test them separately.
|
||||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: test_sqlite_mysql_postgresql_mimalloc
|
id: test_sqlite_mysql_postgresql_mimalloc
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql,enable_mimalloc
|
cargo test --features sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
- name: "test features: sqlite,mysql,postgresql"
|
- name: "test features: sqlite,mysql,postgresql"
|
||||||
id: test_sqlite_mysql_postgresql
|
id: test_sqlite_mysql_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql
|
cargo test --features sqlite,mysql,postgresql
|
||||||
|
|
||||||
- name: "test features: sqlite"
|
- name: "test features: sqlite"
|
||||||
id: test_sqlite
|
id: test_sqlite
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite
|
cargo test --features sqlite
|
||||||
|
|
||||||
- name: "test features: mysql"
|
- name: "test features: mysql"
|
||||||
id: test_mysql
|
id: test_mysql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features mysql
|
cargo test --features mysql
|
||||||
|
|
||||||
- name: "test features: postgresql"
|
- name: "test features: postgresql"
|
||||||
id: test_postgresql
|
id: test_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features postgresql
|
cargo test --features postgresql
|
||||||
# End Run cargo tests
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
# Run cargo clippy, and fail on warnings
|
||||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: clippy
|
id: clippy
|
||||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||||
run: |
|
run: |
|
||||||
cargo clippy --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||||
# End Run cargo clippy
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
@@ -182,21 +195,3 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
|
||||||
# Build the binary to upload to the artifacts
|
|
||||||
- name: "build features: sqlite,mysql,postgresql"
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
run: |
|
|
||||||
cargo build --release --features sqlite,mysql,postgresql
|
|
||||||
# End Build the binary
|
|
||||||
|
|
||||||
|
|
||||||
# Upload artifact to Github Actions
|
|
||||||
- name: "Upload artifact"
|
|
||||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
with:
|
|
||||||
name: vaultwarden
|
|
||||||
path: target/release/vaultwarden
|
|
||||||
# End Upload artifact to Github Actions
|
|
||||||
|
7
.github/workflows/hadolint.yml
vendored
7
.github/workflows/hadolint.yml
vendored
@@ -8,15 +8,14 @@ on: [
|
|||||||
jobs:
|
jobs:
|
||||||
hadolint:
|
hadolint:
|
||||||
name: Validate Dockerfile syntax
|
name: Validate Dockerfile syntax
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
- name: Download hadolint
|
- name: Download hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -30,5 +29,5 @@ jobs:
|
|||||||
# Test Dockerfiles
|
# Test Dockerfiles
|
||||||
- name: Run hadolint
|
- name: Run hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored --cached | xargs hadolint
|
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||||
# End Test Dockerfiles
|
# End Test Dockerfiles
|
||||||
|
283
.github/workflows/release.yml
vendored
283
.github/workflows/release.yml
vendored
@@ -2,21 +2,10 @@ name: Release
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
branches:
|
||||||
- ".github/workflows/release.yml"
|
|
||||||
- "src/**"
|
|
||||||
- "migrations/**"
|
|
||||||
- "hooks/**"
|
|
||||||
- "docker/**"
|
|
||||||
- "Cargo.*"
|
|
||||||
- "build.rs"
|
|
||||||
- "diesel.toml"
|
|
||||||
- "rust-toolchain"
|
|
||||||
|
|
||||||
branches: # Only on paths above
|
|
||||||
- main
|
- main
|
||||||
|
|
||||||
tags: # Always, regardless of paths above
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -24,34 +13,31 @@ jobs:
|
|||||||
# Some checks to determine if we need to continue with building a new docker.
|
# Some checks to determine if we need to continue with building a new docker.
|
||||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||||
skip_check:
|
skip_check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
outputs:
|
outputs:
|
||||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||||
steps:
|
steps:
|
||||||
- name: Skip Duplicates Actions
|
- name: Skip Duplicates Actions
|
||||||
id: skip_check
|
id: skip_check
|
||||||
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
|
uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
|
||||||
with:
|
with:
|
||||||
cancel_others: 'true'
|
cancel_others: 'true'
|
||||||
# Only run this when not creating a tag
|
# Only run this when not creating a tag
|
||||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
if: ${{ github.ref_type == 'branch' }}
|
||||||
|
|
||||||
docker-build:
|
docker-build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
needs: skip_check
|
needs: skip_check
|
||||||
# Start a local docker registry to be used to generate multi-arch images.
|
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
|
# Start a local docker registry to extract the final Alpine static build binaries
|
||||||
services:
|
services:
|
||||||
registry:
|
registry:
|
||||||
image: registry:2
|
image: registry:2
|
||||||
ports:
|
ports:
|
||||||
- 5000:5000
|
- 5000:5000
|
||||||
env:
|
env:
|
||||||
# Use BuildKit (https://docs.docker.com/build/buildkit/) for better
|
|
||||||
# build performance and the ability to copy extended file attributes
|
|
||||||
# (e.g., for executable capabilities) across build phases.
|
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
SOURCE_COMMIT: ${{ github.sha }}
|
SOURCE_COMMIT: ${{ github.sha }}
|
||||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||||
# The *_REPO variables need to be configured as repository variables
|
# The *_REPO variables need to be configured as repository variables
|
||||||
@@ -65,7 +51,6 @@ jobs:
|
|||||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||||
# Check for Quay.io credentials in secrets
|
# Check for Quay.io credentials in secrets
|
||||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
base_image: ["debian","alpine"]
|
base_image: ["debian","alpine"]
|
||||||
@@ -73,163 +58,195 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Determine Docker Tag
|
- name: Initialize QEMU binfmt support
|
||||||
- name: Init Variables
|
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
|
||||||
id: vars
|
with:
|
||||||
|
platforms: "arm64,arm"
|
||||||
|
|
||||||
|
# Start Docker Buildx
|
||||||
|
- name: Setup Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0
|
||||||
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
|
# Also set max parallelism to 3, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||||
|
with:
|
||||||
|
buildkitd-config-inline: |
|
||||||
|
[worker.oci]
|
||||||
|
max-parallelism = 3
|
||||||
|
driver-opts: |
|
||||||
|
network=host
|
||||||
|
|
||||||
|
# Determine Base Tags and Source Version
|
||||||
|
- name: Determine Base Tags and Source Version
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# Check which main tag we are going to build determined by github.ref
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
echo "DOCKER_TAG=${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
|
||||||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
echo "DOCKER_TAG=testing" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
|
||||||
fi
|
fi
|
||||||
# End Determine Docker Tag
|
|
||||||
|
# Get the Source Version for this release
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null || true)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
echo "SOURCE_VERSION=${GIT_EXACT_TAG}" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
echo "SOURCE_VERSION=${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}" | tee -a "${GITHUB_ENV}"
|
||||||
|
fi
|
||||||
|
# End Determine Base Tags
|
||||||
|
|
||||||
# Login to Docker Hub
|
# Login to Docker Hub
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for DockerHub
|
||||||
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to GitHub Container Registry
|
# Login to GitHub Container Registry
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for ghcr.io
|
||||||
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to Quay.io
|
# Login to Quay.io
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.QUAY_USERNAME }}
|
||||||
password: ${{ secrets.QUAY_TOKEN }}
|
password: ${{ secrets.QUAY_TOKEN }}
|
||||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
|
|
||||||
# Debian
|
- name: Add registry for Quay.io
|
||||||
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
# Docker Hub
|
|
||||||
- name: Build Debian based images (docker.io)
|
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/build
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (docker.io)
|
- name: Configure build cache from/to
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/push
|
#
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
# Check if there is a GitHub Container Registry Login and use it for caching
|
||||||
|
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
||||||
|
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
echo "BAKE_CACHE_FROM="
|
||||||
|
echo "BAKE_CACHE_TO="
|
||||||
|
fi
|
||||||
|
#
|
||||||
|
|
||||||
# GitHub Container Registry
|
- name: Add localhost registry
|
||||||
- name: Build Debian based images (ghcr.io)
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/build
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (ghcr.io)
|
- name: Bake ${{ matrix.base_image }} containers
|
||||||
|
uses: docker/bake-action@a4d7f0b5b91c14a296d792d4ec53a9db17f02e67 # v5.5.0
|
||||||
|
env:
|
||||||
|
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||||
|
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||||
|
SOURCE_VERSION: "${{ env.SOURCE_VERSION }}"
|
||||||
|
SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}"
|
||||||
|
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
|
||||||
|
with:
|
||||||
|
pull: true
|
||||||
|
push: true
|
||||||
|
files: docker/docker-bake.hcl
|
||||||
|
targets: "${{ matrix.base_image }}-multi"
|
||||||
|
set: |
|
||||||
|
*.cache-from=${{ env.BAKE_CACHE_FROM }}
|
||||||
|
*.cache-to=${{ env.BAKE_CACHE_TO }}
|
||||||
|
|
||||||
|
|
||||||
|
# Extract the Alpine binaries from the containers
|
||||||
|
- name: Extract binaries
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/push
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
|
EXTRACT_TAG="latest"
|
||||||
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
|
EXTRACT_TAG="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
# Quay.io
|
# After each extraction the image is removed.
|
||||||
- name: Build Debian based images (quay.io)
|
# This is needed because using different platforms doesn't trigger a new pull/download
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (quay.io)
|
# Extract amd64 binary
|
||||||
shell: bash
|
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
env:
|
docker cp amd64:/vaultwarden vaultwarden-amd64
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
docker rm --force amd64
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Alpine
|
# Extract arm64 binary
|
||||||
|
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
docker cp arm64:/vaultwarden vaultwarden-arm64
|
||||||
|
docker rm --force arm64
|
||||||
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
|
||||||
# Docker Hub
|
# Extract armv7 binary
|
||||||
- name: Build Alpine based images (docker.io)
|
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
shell: bash
|
docker cp armv7:/vaultwarden vaultwarden-armv7
|
||||||
env:
|
docker rm --force armv7
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (docker.io)
|
# Extract armv6 binary
|
||||||
shell: bash
|
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
env:
|
docker cp armv6:/vaultwarden vaultwarden-armv6
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
docker rm --force armv6
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# GitHub Container Registry
|
# Upload artifacts to Github Actions
|
||||||
- name: Build Alpine based images (ghcr.io)
|
- name: "Upload amd64 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
||||||
run: |
|
path: vaultwarden-amd64
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (ghcr.io)
|
- name: "Upload arm64 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
||||||
run: |
|
path: vaultwarden-arm64
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Quay.io
|
- name: "Upload armv7 artifact"
|
||||||
- name: Build Alpine based images (quay.io)
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
shell: bash
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
env:
|
with:
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
path: vaultwarden-armv7
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (quay.io)
|
- name: "Upload armv6 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
||||||
run: |
|
path: vaultwarden-armv6
|
||||||
./hooks/push
|
# End Upload artifacts to Github Actions
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
26
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
26
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
manual_trigger:
|
||||||
|
description: "Manual trigger buildcache cleanup"
|
||||||
|
required: false
|
||||||
|
default: ""
|
||||||
|
|
||||||
|
schedule:
|
||||||
|
- cron: '0 1 * * FRI'
|
||||||
|
|
||||||
|
name: Cleanup
|
||||||
|
jobs:
|
||||||
|
releasecache-cleanup:
|
||||||
|
name: Releasecache Cleanup
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
continue-on-error: true
|
||||||
|
timeout-minutes: 30
|
||||||
|
steps:
|
||||||
|
- name: Delete vaultwarden-buildcache containers
|
||||||
|
uses: actions/delete-package-versions@e5bc658cc4c965c472efe991f8beea3981499c55 # v5.0.0
|
||||||
|
with:
|
||||||
|
package-name: 'vaultwarden-buildcache'
|
||||||
|
package-type: 'container'
|
||||||
|
min-versions-to-keep: 0
|
||||||
|
delete-only-untagged-versions: 'false'
|
42
.github/workflows/trivy.yml
vendored
Normal file
42
.github/workflows/trivy.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: trivy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
pull_request:
|
||||||
|
branches: [ "main" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '00 12 * * *'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
trivy-scan:
|
||||||
|
name: Check
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
actions: read
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
|
||||||
|
|
||||||
|
- name: Run Trivy vulnerability scanner
|
||||||
|
uses: aquasecurity/trivy-action@6e7b7d1fd3e4fef0c5fa8cce1229c54b2c9bd0d8 # v0.24.0
|
||||||
|
with:
|
||||||
|
scan-type: repo
|
||||||
|
ignore-unfixed: true
|
||||||
|
format: sarif
|
||||||
|
output: trivy-results.sarif
|
||||||
|
severity: CRITICAL,HIGH
|
||||||
|
|
||||||
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
|
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.25.10
|
||||||
|
with:
|
||||||
|
sarif_file: 'trivy-results.sarif'
|
@@ -1,10 +1,12 @@
|
|||||||
ignored:
|
ignored:
|
||||||
|
# To prevent issues and make clear some images only work on linux/amd64, we ignore this
|
||||||
|
- DL3029
|
||||||
# disable explicit version for apt install
|
# disable explicit version for apt install
|
||||||
- DL3008
|
- DL3008
|
||||||
# disable explicit version for apk install
|
# disable explicit version for apk install
|
||||||
- DL3018
|
- DL3018
|
||||||
# disable check for consecutive `RUN` instructions
|
# Ignore shellcheck info message
|
||||||
- DL3059
|
- SC1091
|
||||||
trustedRegistries:
|
trustedRegistries:
|
||||||
- docker.io
|
- docker.io
|
||||||
- ghcr.io
|
- ghcr.io
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.4.0
|
rev: v4.6.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
- id: check-json
|
- id: check-json
|
||||||
|
2450
Cargo.lock
generated
2450
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
183
Cargo.toml
183
Cargo.toml
@@ -3,7 +3,7 @@ name = "vaultwarden"
|
|||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.68.2"
|
rust-version = "1.79.0"
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
@@ -36,75 +36,74 @@ unstable = []
|
|||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(not(windows))".dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
syslog = "6.1.0"
|
syslog = "6.1.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.19"
|
log = "0.4.22"
|
||||||
fern = { version = "0.6.2", features = ["syslog-6"] }
|
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
||||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenvy = { version = "0.15.7", default-features = false }
|
dotenvy = { version = "0.15.7", default-features = false }
|
||||||
|
|
||||||
# Lazy initialization
|
# Lazy initialization
|
||||||
once_cell = "1.18.0"
|
once_cell = "1.19.0"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.19"
|
||||||
num-derive = "0.4.0"
|
num-derive = "0.4.2"
|
||||||
|
bigdecimal = "0.4.5"
|
||||||
|
|
||||||
# Web framework
|
# Web framework
|
||||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
|
||||||
# rocket_ws = { version ="0.1.0-rc.3" }
|
rocket_ws = { version ="0.1.1" }
|
||||||
rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # v0.5 branch
|
|
||||||
|
|
||||||
# WebSockets libraries
|
# WebSockets libraries
|
||||||
tokio-tungstenite = "0.19.0"
|
rmpv = "1.3.0" # MessagePack library
|
||||||
rmpv = "1.0.0" # MessagePack library
|
|
||||||
|
|
||||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||||
dashmap = "5.4.0"
|
dashmap = "6.0.1"
|
||||||
|
|
||||||
# Async futures
|
# Async futures
|
||||||
futures = "0.3.28"
|
futures = "0.3.30"
|
||||||
tokio = { version = "1.29.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
tokio = { version = "1.39.2", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.166", features = ["derive"] }
|
serde = { version = "1.0.204", features = ["derive"] }
|
||||||
serde_json = "1.0.99"
|
serde_json = "1.0.122"
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "2.1.0", features = ["chrono", "r2d2"] }
|
diesel = { version = "2.2.2", features = ["chrono", "r2d2", "numeric"] }
|
||||||
diesel_migrations = "2.1.0"
|
diesel_migrations = "2.2.0"
|
||||||
diesel_logger = { version = "0.3.0", optional = true }
|
diesel_logger = { version = "0.3.0", optional = true }
|
||||||
|
|
||||||
# Bundled/Static SQLite
|
# Bundled/Static SQLite
|
||||||
libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.29.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||||
ring = "0.16.20"
|
ring = "0.17.8"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "1.4.0", features = ["v4"] }
|
uuid = { version = "1.10.0", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.26", features = ["clock", "serde"], default-features = false }
|
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.8.3"
|
chrono-tz = "0.9.0"
|
||||||
time = "0.3.22"
|
time = "0.3.36"
|
||||||
|
|
||||||
# Job scheduler
|
# Job scheduler
|
||||||
job_scheduler_ng = "2.0.4"
|
job_scheduler_ng = "2.0.5"
|
||||||
|
|
||||||
# Data encoding library Hex/Base32/Base64
|
# Data encoding library Hex/Base32/Base64
|
||||||
data-encoding = "2.4.0"
|
data-encoding = "2.6.0"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "8.3.0"
|
jsonwebtoken = "9.3.0"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
totp-lite = "2.0.0"
|
totp-lite = "2.0.1"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||||
@@ -113,71 +112,135 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
|||||||
webauthn-rs = "0.3.2"
|
webauthn-rs = "0.3.2"
|
||||||
|
|
||||||
# Handling of URL's for WebAuthn and favicons
|
# Handling of URL's for WebAuthn and favicons
|
||||||
url = "2.4.0"
|
url = "2.5.2"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.10.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
lettre = { version = "0.11.7", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||||
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||||
email_address = "0.2.4"
|
email_address = "0.2.9"
|
||||||
|
|
||||||
# HTML Template library
|
# HTML Template library
|
||||||
handlebars = { version = "4.3.7", features = ["dir_source"] }
|
handlebars = { version = "6.0.0", features = ["dir_source"] }
|
||||||
|
|
||||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||||
reqwest = { version = "0.11.18", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
reqwest = { version = "0.12.5", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||||
|
hickory-resolver = "0.24.1"
|
||||||
|
|
||||||
# Favicon extraction libraries
|
# Favicon extraction libraries
|
||||||
html5gum = "0.5.3"
|
html5gum = "0.5.7"
|
||||||
regex = { version = "1.8.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
regex = { version = "1.10.6", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
data-url = "0.3.0"
|
data-url = "0.3.1"
|
||||||
bytes = "1.4.0"
|
bytes = "1.7.1"
|
||||||
|
|
||||||
# Cache function results (Used for version check and favicon fetching)
|
# Cache function results (Used for version check and favicon fetching)
|
||||||
cached = "0.44.0"
|
cached = { version = "0.53.1", features = ["async"] }
|
||||||
|
|
||||||
# Used for custom short lived cookie jar during favicon extraction
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
cookie = "0.16.2"
|
cookie = "0.18.1"
|
||||||
cookie_store = "0.19.1"
|
cookie_store = "0.21.0"
|
||||||
|
|
||||||
# Used by U2F, JWT and PostgreSQL
|
# Used by U2F, JWT and PostgreSQL
|
||||||
openssl = "0.10.55"
|
openssl = "0.10.66"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
pico-args = "0.5.0"
|
pico-args = "0.5.0"
|
||||||
|
|
||||||
# Macro ident concatenation
|
# Macro ident concatenation
|
||||||
paste = "1.0.13"
|
paste = "1.0.15"
|
||||||
governor = "0.5.1"
|
governor = "0.6.3"
|
||||||
|
|
||||||
# Check client versions for specific features.
|
# Check client versions for specific features.
|
||||||
semver = "1.0.17"
|
semver = "1.0.23"
|
||||||
|
|
||||||
# Allow overriding the default memory allocator
|
# Allow overriding the default memory allocator
|
||||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
mimalloc = { version = "0.1.37", features = ["secure"], default-features = false, optional = true }
|
mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
|
||||||
which = "4.4.0"
|
which = "6.0.2"
|
||||||
|
|
||||||
# Argon2 library with support for the PHC format
|
# Argon2 library with support for the PHC format
|
||||||
argon2 = "0.5.0"
|
argon2 = "0.5.3"
|
||||||
|
|
||||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||||
rpassword = "7.2.0"
|
rpassword = "7.3.1"
|
||||||
|
|
||||||
[patch.crates-io]
|
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
|
|
||||||
# Strip debuginfo from the release builds
|
# Strip debuginfo from the release builds
|
||||||
# Also enable thin LTO for some optimizations
|
# The symbols are the provide better panic traces
|
||||||
|
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||||
[profile.release]
|
[profile.release]
|
||||||
strip = "debuginfo"
|
strip = "debuginfo"
|
||||||
lto = "thin"
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
|
||||||
|
# A little bit of a speedup
|
||||||
|
[profile.dev]
|
||||||
|
split-debuginfo = "unpacked"
|
||||||
|
|
||||||
# Always build argon2 using opt-level 3
|
# Always build argon2 using opt-level 3
|
||||||
# This is a huge speed improvement during testing
|
# This is a huge speed improvement during testing
|
||||||
[profile.dev.package.argon2]
|
[profile.dev.package.argon2]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
|
|
||||||
# A little bit of a speedup
|
# Optimize for size
|
||||||
[profile.dev]
|
[profile.release-micro]
|
||||||
split-debuginfo = "unpacked"
|
inherits = "release"
|
||||||
|
opt-level = "z"
|
||||||
|
strip = "symbols"
|
||||||
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
panic = "abort"
|
||||||
|
|
||||||
|
# Profile for systems with low resources
|
||||||
|
# It will use less resources during build
|
||||||
|
[profile.release-low]
|
||||||
|
inherits = "release"
|
||||||
|
strip = "symbols"
|
||||||
|
lto = "thin"
|
||||||
|
codegen-units = 16
|
||||||
|
|
||||||
|
# Linting config
|
||||||
|
[lints.rust]
|
||||||
|
# Forbid
|
||||||
|
unsafe_code = "forbid"
|
||||||
|
non_ascii_idents = "forbid"
|
||||||
|
|
||||||
|
# Deny
|
||||||
|
future_incompatible = { level = "deny", priority = -1 }
|
||||||
|
noop_method_call = "deny"
|
||||||
|
rust_2018_idioms = { level = "deny", priority = -1 }
|
||||||
|
rust_2021_compatibility = { level = "deny", priority = -1 }
|
||||||
|
trivial_casts = "deny"
|
||||||
|
trivial_numeric_casts = "deny"
|
||||||
|
unused = { level = "deny", priority = -1 }
|
||||||
|
unused_import_braces = "deny"
|
||||||
|
unused_lifetimes = "deny"
|
||||||
|
deprecated_in_future = "deny"
|
||||||
|
|
||||||
|
[lints.clippy]
|
||||||
|
# Allow
|
||||||
|
# We need this since Rust v1.76+, since it has some bugs
|
||||||
|
# https://github.com/rust-lang/rust-clippy/issues/12016
|
||||||
|
blocks_in_conditions = "allow"
|
||||||
|
|
||||||
|
# Deny
|
||||||
|
cast_lossless = "deny"
|
||||||
|
clone_on_ref_ptr = "deny"
|
||||||
|
equatable_if_let = "deny"
|
||||||
|
float_cmp_const = "deny"
|
||||||
|
inefficient_to_string = "deny"
|
||||||
|
iter_on_empty_collections = "deny"
|
||||||
|
iter_on_single_items = "deny"
|
||||||
|
linkedlist = "deny"
|
||||||
|
macro_use_imports = "deny"
|
||||||
|
manual_assert = "deny"
|
||||||
|
manual_instant_elapsed = "deny"
|
||||||
|
manual_string_new = "deny"
|
||||||
|
match_wildcard_for_single_variants = "deny"
|
||||||
|
mem_forget = "deny"
|
||||||
|
needless_lifetimes = "deny"
|
||||||
|
string_add_assign = "deny"
|
||||||
|
string_to_string = "deny"
|
||||||
|
unnecessary_join = "deny"
|
||||||
|
unnecessary_self_imports = "deny"
|
||||||
|
unused_async = "deny"
|
||||||
|
verbose_file_reads = "deny"
|
||||||
|
zero_sized_map_values = "deny"
|
||||||
|
@@ -1 +1 @@
|
|||||||
docker/amd64/Dockerfile
|
docker/Dockerfile.debian
|
@@ -42,7 +42,7 @@ docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p
|
|||||||
```
|
```
|
||||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
**IMPORTANT**: Most modern web browsers, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||||
|
|
||||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||||
|
|
||||||
@@ -92,4 +92,11 @@ Thanks for your contribution to the project!
|
|||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/IQ333777" style="width: 75px">
|
||||||
|
<sub><b>IQ333777</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
26
build.rs
26
build.rs
@@ -17,8 +17,22 @@ fn main() {
|
|||||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Use check-cfg to let cargo know which cfg's we define,
|
||||||
|
// and avoid warnings when they are used in the code.
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(sqlite)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(mysql)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(postgresql)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(query_logger)");
|
||||||
|
|
||||||
|
// Rerun when these paths are changed.
|
||||||
|
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
||||||
|
println!("cargo:rerun-if-changed=.git");
|
||||||
|
println!("cargo:rerun-if-changed=.git/HEAD");
|
||||||
|
println!("cargo:rerun-if-changed=.git/index");
|
||||||
|
println!("cargo:rerun-if-changed=.git/refs/tags");
|
||||||
|
|
||||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||||
compile_error!("Query Logging is only allowed during development, it is not intented for production usage!");
|
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
|
||||||
|
|
||||||
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||||
// If neither exist, read from git.
|
// If neither exist, read from git.
|
||||||
@@ -42,11 +56,11 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
|||||||
|
|
||||||
/// This method reads info from Git, namely tags, branch, and revision
|
/// This method reads info from Git, namely tags, branch, and revision
|
||||||
/// To access these values, use:
|
/// To access these values, use:
|
||||||
/// - env!("GIT_EXACT_TAG")
|
/// - `env!("GIT_EXACT_TAG")`
|
||||||
/// - env!("GIT_LAST_TAG")
|
/// - `env!("GIT_LAST_TAG")`
|
||||||
/// - env!("GIT_BRANCH")
|
/// - `env!("GIT_BRANCH")`
|
||||||
/// - env!("GIT_REV")
|
/// - `env!("GIT_REV")`
|
||||||
/// - env!("VW_VERSION")
|
/// - `env!("VW_VERSION")`
|
||||||
fn version_from_git_info() -> Result<String, std::io::Error> {
|
fn version_from_git_info() -> Result<String, std::io::Error> {
|
||||||
// The exact tag for the current commit, can be empty when
|
// The exact tag for the current commit, can be empty when
|
||||||
// the current commit doesn't have an associated tag
|
// the current commit doesn't have an associated tag
|
||||||
|
28
docker/DockerSettings.yaml
Normal file
28
docker/DockerSettings.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
vault_version: "v2024.6.2b"
|
||||||
|
vault_image_digest: "sha256:25a8b48792a3d2c16ddaea493eee93a0b6785648f2d8782c7537d198cb41be55"
|
||||||
|
# Cross Compile Docker Helper Scripts v1.4.0
|
||||||
|
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||||
|
xx_image_digest: "sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4"
|
||||||
|
rust_version: 1.80.1 # Rust version to be used
|
||||||
|
debian_version: bookworm # Debian release name to be used
|
||||||
|
alpine_version: "3.20" # Alpine version to be used
|
||||||
|
# For which platforms/architectures will we try to build images
|
||||||
|
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
# Determine the build images per OS/Arch
|
||||||
|
build_stage_image:
|
||||||
|
debian:
|
||||||
|
image: "docker.io/library/rust:{{rust_version}}-slim-{{debian_version}}"
|
||||||
|
platform: "$BUILDPLATFORM"
|
||||||
|
alpine:
|
||||||
|
image: "build_${TARGETARCH}${TARGETVARIANT}"
|
||||||
|
platform: "linux/amd64" # The Alpine build images only have linux/amd64 images
|
||||||
|
arch_image:
|
||||||
|
amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}"
|
||||||
|
arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}"
|
||||||
|
armv7: "ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-{{rust_version}}"
|
||||||
|
armv6: "ghcr.io/blackdex/rust-musl:arm-musleabi-stable-{{rust_version}}"
|
||||||
|
# The final image which will be used to distribute the container images
|
||||||
|
runtime_stage_image:
|
||||||
|
debian: "docker.io/library/debian:{{debian_version}}-slim"
|
||||||
|
alpine: "docker.io/library/alpine:{{alpine_version}}"
|
158
docker/Dockerfile.alpine
Normal file
158
docker/Dockerfile.alpine
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2b
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2b
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:25a8b48792a3d2c16ddaea493eee93a0b6785648f2d8782c7537d198cb41be55]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:25a8b48792a3d2c16ddaea493eee93a0b6785648f2d8782c7537d198cb41be55
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2024.6.2b]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:25a8b48792a3d2c16ddaea493eee93a0b6785648f2d8782c7537d198cb41be55 AS vault
|
||||||
|
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.80.1 AS build_amd64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.80.1 AS build_arm64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.80.1 AS build_armv7
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.80.1 AS build_armv6
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} AS build
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root" \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
|
rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Environment variables for Cargo on Alpine based builds
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.20
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apk --no-cache add \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
openssl \
|
||||||
|
tzdata
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,34 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
|
||||||
# manifest, rather than the target arch. For example:
|
|
||||||
#
|
|
||||||
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
|
||||||
# amd64
|
|
||||||
#
|
|
||||||
# Recent versions of Docker have started printing a warning when the image's
|
|
||||||
# claimed arch doesn't match the host arch. For example:
|
|
||||||
#
|
|
||||||
# WARNING: The requested image's platform (linux/amd64) does not match the
|
|
||||||
# detected host platform (linux/arm/v7) and no specific platform was requested
|
|
||||||
#
|
|
||||||
# The image still works fine, but the spurious warning creates confusion.
|
|
||||||
#
|
|
||||||
# Docker doesn't seem to provide a way to directly set the arch of an image
|
|
||||||
# at build time. To resolve the build vs. target arch discrepancy, we use
|
|
||||||
# Docker Buildx to build a new set of images with the correct target arch.
|
|
||||||
#
|
|
||||||
# Docker Buildx uses this Dockerfile to build an image for each requested
|
|
||||||
# platform. Since the Dockerfile basically consists of a single `FROM`
|
|
||||||
# instruction, we're effectively telling Buildx to build a platform-specific
|
|
||||||
# image by simply copying the existing cross-built image and setting the
|
|
||||||
# correct target arch as a side effect.
|
|
||||||
#
|
|
||||||
# References:
|
|
||||||
#
|
|
||||||
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
|
||||||
#
|
|
||||||
ARG LOCAL_REPO
|
|
||||||
ARG DOCKER_TAG
|
|
||||||
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
|
201
docker/Dockerfile.debian
Normal file
201
docker/Dockerfile.debian
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2b
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2b
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:25a8b48792a3d2c16ddaea493eee93a0b6785648f2d8782c7537d198cb41be55]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:25a8b48792a3d2c16ddaea493eee93a0b6785648f2d8782c7537d198cb41be55
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2024.6.2b]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:25a8b48792a3d2c16ddaea493eee93a0b6785648f2d8782c7537d198cb41be55 AS vault
|
||||||
|
|
||||||
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.80.1-slim-bookworm AS build
|
||||||
|
COPY --from=xx / /
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
|
rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Environment variables for Cargo on Debian based builds
|
||||||
|
ARG ARCH_OPENSSL_LIB_DIR \
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
if xx-info is-cross ; then \
|
||||||
|
# Some special variables if needed to override some build paths
|
||||||
|
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/debian:bookworm-slim
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,68 +1,15 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
{% set rust_version = "1.70.0" %}
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
{% set debian_version = "bullseye" %}
|
|
||||||
{% set alpine_version = "3.17" %}
|
|
||||||
{% set build_stage_base_image = "docker.io/library/rust:%s-%s" % (rust_version, debian_version) %}
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "amd64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:x86_64-musl-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/library/alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:armv7-musleabihf-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:arm-musleabi-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:aarch64-musl-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
|
||||||
{% endif %}
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/library/debian:%s-slim" % debian_version %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "arm64" %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
|
||||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "armel" %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "armhf" %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_name is defined %}
|
|
||||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_prefix = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_target_param = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if "buildkit" in target_file %}
|
|
||||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
|
||||||
{% else %}
|
|
||||||
{% set mount_rust_cache = "" %}
|
|
||||||
{% endif %}
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
{% set vault_version = "v2023.5.0" %}
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_image_digest = "sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085" %}
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# Using the digest instead of the tag name provides better security,
|
# Using the digest instead of the tag name provides better security,
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
@@ -80,10 +27,33 @@
|
|||||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||||
#
|
#
|
||||||
FROM docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} AS vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
{% if base == "debian" %}
|
||||||
FROM {{ build_stage_base_image }} as build
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
{% for arch in build_stage_image[base].arch_image %}
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} AS build_{{ arch }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} AS build
|
||||||
|
{% if base == "debian" %}
|
||||||
|
COPY --from=xx / /
|
||||||
|
{% endif %}
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
@@ -92,152 +62,183 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
|||||||
TERM=xterm-256color \
|
TERM=xterm-256color \
|
||||||
CARGO_HOME="/root/.cargo" \
|
CARGO_HOME="/root/.cargo" \
|
||||||
USER="root"
|
USER="root"
|
||||||
|
{%- if base == "alpine" %} \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if base == "debian" %}
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
&& rustup set profile minimal
|
rustup set profile minimal
|
||||||
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "armv6" in target_file %}
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
|
||||||
{% endif %}
|
|
||||||
{% elif "arm" in target_file %}
|
|
||||||
# Install build dependencies for the {{ package_arch_name }} architecture
|
|
||||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-{{ package_cross_compiler }} \
|
|
||||||
libc6-dev{{ package_arch_prefix }} \
|
|
||||||
libmariadb-dev{{ package_arch_prefix }} \
|
|
||||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
|
||||||
libmariadb3{{ package_arch_prefix }} \
|
|
||||||
libpq-dev{{ package_arch_prefix }} \
|
|
||||||
libpq5{{ package_arch_prefix }} \
|
|
||||||
libssl-dev{{ package_arch_prefix }} \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
{% if base == "debian" %}
|
||||||
COPY ./Cargo.* ./
|
# Environment variables for Cargo on Debian based builds
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
ARG ARCH_OPENSSL_LIB_DIR \
|
||||||
COPY ./build.rs ./build.rs
|
ARCH_OPENSSL_INCLUDE_DIR
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
if xx-info is-cross ; then \
|
||||||
|
# Some special variables if needed to override some build paths
|
||||||
|
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
# Environment variables for Cargo on Alpine based builds
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
{% if "alpine" in target_file %}
|
{% if base == "debian" %}
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% elif base == "alpine" %}
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
{% else %}
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
RUN source /env-cargo && \
|
||||||
&& find . -not -path "./target*" -delete
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
ARG VW_VERSION
|
||||||
RUN touch src/main.rs
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM {{ runtime_stage_base_image }}
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM {{ runtime_stage_image[base] }}
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
ENV ROCKET_PROFILE="release" \
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ROCKET_PORT=80
|
ROCKET_PORT=80
|
||||||
{%- if "alpine" in runtime_stage_base_image %} \
|
{%- if base == "debian" %} \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
{% elif base == "alpine" %} \
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN mkdir /data \
|
RUN mkdir /data && \
|
||||||
{% if "alpine" in runtime_stage_base_image %}
|
{% if base == "debian" %}
|
||||||
&& apk add --no-cache \
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
apk --no-cache add \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
openssl \
|
openssl \
|
||||||
tzdata
|
tzdata
|
||||||
{% else %}
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
|
||||||
{% else %}
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
@@ -1,15 +1,4 @@
|
|||||||
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
all:
|
||||||
|
./render_template Dockerfile.j2 '{"base": "debian"}' > Dockerfile.debian
|
||||||
all: $(OBJECTS)
|
./render_template Dockerfile.j2 '{"base": "alpine"}' > Dockerfile.alpine
|
||||||
|
.PHONY: all
|
||||||
%/Dockerfile: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
189
docker/README.md
189
docker/README.md
@@ -1,3 +1,188 @@
|
|||||||
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
# Vaultwarden Container Building
|
||||||
|
|
||||||
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
To build and release new testing and stable releases of Vaultwarden we use `docker buildx bake`.<br>
|
||||||
|
This can be used locally by running the command yourself, but it is also used by GitHub Actions.
|
||||||
|
|
||||||
|
This makes it easier for us to test and maintain the different architectures we provide.<br>
|
||||||
|
We also just have two Dockerfile's one for Debian and one for Alpine based images.<br>
|
||||||
|
With just these two files we can build both Debian and Alpine images for the following platforms:
|
||||||
|
- amd64 (linux/amd64)
|
||||||
|
- arm64 (linux/arm64)
|
||||||
|
- armv7 (linux/arm/v7)
|
||||||
|
- armv6 (linux/arm/v6)
|
||||||
|
|
||||||
|
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
|
||||||
|
- 386 (linux/386)
|
||||||
|
- ppc64le (linux/ppc64le)
|
||||||
|
- s390x (linux/s390x)
|
||||||
|
|
||||||
|
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||||
|
This ensures the container build process can run binaries from other architectures.<br>
|
||||||
|
|
||||||
|
**NOTE**: Run all the examples below from the root of the repo.<br>
|
||||||
|
|
||||||
|
|
||||||
|
## How to install QEMU binfmt support
|
||||||
|
|
||||||
|
This is different per host OS, but most support this in some way.<br>
|
||||||
|
|
||||||
|
### Ubuntu/Debian
|
||||||
|
```bash
|
||||||
|
apt install binfmt-support qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Arch Linux (others based upon it)
|
||||||
|
```bash
|
||||||
|
pacman -S qemu-user-static qemu-user-static-binfmt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fedora
|
||||||
|
```bash
|
||||||
|
dnf install qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Others
|
||||||
|
There also is an option to use an other docker container to provide support for this.
|
||||||
|
```bash
|
||||||
|
# To install and activate
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To unistall
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Single architecture container building
|
||||||
|
|
||||||
|
You can build a container per supported architecture as long as you have QEMU binfmt support installed on your system.<br>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Default bake triggers a Debian build using the hosts architecture
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl
|
||||||
|
|
||||||
|
# Bake Debian ARM64 using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl debian-arm64
|
||||||
|
|
||||||
|
# Bake Alpine ARMv6 as a release build
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-armv6
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Local Multi Architecture container building
|
||||||
|
|
||||||
|
Start the initialization, this only needs to be done once.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create and use a new buildx builder instance which connects to the host network
|
||||||
|
docker buildx create --name vaultwarden --use --driver-opt network=host
|
||||||
|
|
||||||
|
# Validate it runs
|
||||||
|
docker buildx inspect --bootstrap
|
||||||
|
|
||||||
|
# Create a local container registry directly reachable on the localhost
|
||||||
|
docker run -d --name registry --network host registry:2
|
||||||
|
```
|
||||||
|
|
||||||
|
After that is done, you should be able to build and push to the local registry.<br>
|
||||||
|
Use the following command with the modified variables to bake the Alpine images.<br>
|
||||||
|
Replace `alpine` with `debian` if you want to build the debian multi arch images.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start a buildx bake using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `bake.sh` script
|
||||||
|
|
||||||
|
To make it a bit more easier to trigger a build, there also is a `bake.sh` script.<br>
|
||||||
|
This script calls `docker buildx bake` with all the right parameters and also generates the `SOURCE_COMMIT` and `SOURCE_VERSION` variables.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
So, if you want to build a Multi Arch Alpine container pushing to your localhost registry you can run this from within the docker directory. (Just make sure you executed the initialization steps above first)
|
||||||
|
```bash
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
./bake.sh alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append both `alpine` and `debian` with `-amd64`, `-arm64`, `-armv7` or `-armv6`, which will trigger a build for that specific platform.<br>
|
||||||
|
This will also append those values to the tag so you can see the builded container when running `docker images`.
|
||||||
|
|
||||||
|
You can also append extra arguments after the target if you want. This can be useful for example to print what bake will use.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh alpine-all --print
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing baked images
|
||||||
|
|
||||||
|
To test these images you can run these images by using the correct tag and provide the platform.<br>
|
||||||
|
For example, after you have build an arm64 image via `./bake.sh debian-arm64` you can run:
|
||||||
|
```bash
|
||||||
|
docker run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `podman-bake.sh` script
|
||||||
|
|
||||||
|
To also make building easier using podman, there is a `podman-bake.sh` script.<br>
|
||||||
|
This script calls `podman buildx build` with the needed parameters and the same as `bake.sh`, it will generate some variables automatically.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
**NOTE:** Unlike the `bake.sh` script, this only supports a single `CONTAINER_REGISTRIES`, and a single `BASE_TAGS` value, no comma separated values. It also only supports building separate architectures, no Multi Arch containers.
|
||||||
|
|
||||||
|
To build an Alpine arm64 image with only sqlite support and mimalloc, run this:
|
||||||
|
```bash
|
||||||
|
DB="sqlite,enable_mimalloc" \
|
||||||
|
./podman-bake.sh alpine-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/podman-bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append extra arguments after the target if you want. This can be useful for example to disable cache like this.
|
||||||
|
```bash
|
||||||
|
./podman-bake.sh alpine-arm64 --no-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
For the podman builds you can, just like the `bake.sh` script, also append the architecture to build for that specific platform.<br>
|
||||||
|
|
||||||
|
### Testing podman builded images
|
||||||
|
|
||||||
|
The command to start a podman built container is almost the same as for the docker/bake built containers. The images start with `localhost/`, so you need to prepend that.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
localhost/vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Variables supported
|
||||||
|
| Variable | default | description |
|
||||||
|
| --------------------- | ------------------ | ----------- |
|
||||||
|
| CARGO_PROFILE | null | Which cargo profile to use. `null` means what is defined in the Dockerfile |
|
||||||
|
| DB | null | Which `features` to build. `null` means what is defined in the Dockerfile |
|
||||||
|
| SOURCE_REPOSITORY_URL | null | The source repository form where this build is triggered |
|
||||||
|
| SOURCE_COMMIT | null | The commit hash of the current commit for this build |
|
||||||
|
| SOURCE_VERSION | null | The current exact tag of this commit, else the last tag and the first 8 chars of the source commit |
|
||||||
|
| BASE_TAGS | testing | Tags to be used. Can be a comma separated value like "latest,1.29.2" |
|
||||||
|
| CONTAINER_REGISTRIES | vaultwarden/server | Comma separated value of container registries. Like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server` |
|
||||||
|
| VW_VERSION | null | To override the `SOURCE_VERSION` value. This is also used by the `build.rs` code for example |
|
||||||
|
@@ -1,118 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/debian:bullseye-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,112 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,118 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/debian:bullseye-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,112 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,139 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,114 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,139 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,114 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,143 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,116 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,143 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,116 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,139 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,114 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,139 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,114 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
15
docker/bake.sh
Executable file
15
docker/bake.sh
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Make sure we set the context to `..` so it will go up one directory
|
||||||
|
docker buildx bake --progress plain --set "*.context=${BASEDIR}/.." -f "${BASEDIR}/docker-bake.hcl" "$@"
|
33
docker/bake_env.sh
Normal file
33
docker/bake_env.sh
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# If SOURCE_COMMIT is provided via env skip this
|
||||||
|
if [ -z "${SOURCE_COMMIT+x}" ]; then
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If VW_VERSION is provided via env use it as SOURCE_VERSION
|
||||||
|
# Else define it using git
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
SOURCE_VERSION="${VW_VERSION}"
|
||||||
|
else
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||||
|
GIT_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||||
|
case "${GIT_BRANCH}" in
|
||||||
|
main|master|HEAD)
|
||||||
|
# Do not add the branch name for these branches
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
SOURCE_VERSION="${SOURCE_VERSION} (${GIT_BRANCH})"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export the rendered variables above so bake will use them
|
||||||
|
export SOURCE_COMMIT
|
||||||
|
export SOURCE_VERSION
|
269
docker/docker-bake.hcl
Normal file
269
docker/docker-bake.hcl
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
// ==== Baking Variables ====
|
||||||
|
|
||||||
|
// Set which cargo profile to use, dev or release for example
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "CARGO_PROFILE" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set which DB's (features) to enable
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "DB" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The repository this build was triggered from
|
||||||
|
variable "SOURCE_REPOSITORY_URL" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The commit hash of of the current commit this build was triggered on
|
||||||
|
variable "SOURCE_COMMIT" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The version of this build
|
||||||
|
// Typically the current exact tag of this commit,
|
||||||
|
// else the last tag and the first 8 characters of the source commit
|
||||||
|
variable "SOURCE_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// This can be used to overwrite SOURCE_VERSION
|
||||||
|
// It will be used during the build.rs building stage
|
||||||
|
variable "VW_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The base tag(s) to use
|
||||||
|
// This can be a comma separated value like "testing,1.29.2"
|
||||||
|
variable "BASE_TAGS" {
|
||||||
|
default = "testing"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Which container registries should be used for the tagging
|
||||||
|
// This can be a comma separated value
|
||||||
|
// Use a full URI like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server`
|
||||||
|
variable "CONTAINER_REGISTRIES" {
|
||||||
|
default = "vaultwarden/server"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking Groups ====
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["debian"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Shared Baking ====
|
||||||
|
function "labels" {
|
||||||
|
params = []
|
||||||
|
result = {
|
||||||
|
"org.opencontainers.image.description" = "Unofficial Bitwarden compatible server written in Rust - ${SOURCE_VERSION}"
|
||||||
|
"org.opencontainers.image.licenses" = "AGPL-3.0-only"
|
||||||
|
"org.opencontainers.image.documentation" = "https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
"org.opencontainers.image.url" = "https://github.com/dani-garcia/vaultwarden"
|
||||||
|
"org.opencontainers.image.created" = "${formatdate("YYYY-MM-DD'T'hh:mm:ssZZZZZ", timestamp())}"
|
||||||
|
"org.opencontainers.image.source" = "${SOURCE_REPOSITORY_URL}"
|
||||||
|
"org.opencontainers.image.revision" = "${SOURCE_COMMIT}"
|
||||||
|
"org.opencontainers.image.version" = "${SOURCE_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "_default_attributes" {
|
||||||
|
labels = labels()
|
||||||
|
args = {
|
||||||
|
DB = "${DB}"
|
||||||
|
CARGO_PROFILE = "${CARGO_PROFILE}"
|
||||||
|
VW_VERSION = "${VW_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Debian Baking ====
|
||||||
|
|
||||||
|
// Default Debian target, will build a container using the hosts platform architecture
|
||||||
|
target "debian" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.debian"
|
||||||
|
tags = generate_tags("", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "debian-multi" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "debian-amd64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-arm64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv7" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv6" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==== Start of unsupported Debian architecture targets ===
|
||||||
|
// These are provided just to help users build for these rare platforms
|
||||||
|
// They will not be built by default
|
||||||
|
target "debian-386" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/386"]
|
||||||
|
tags = generate_tags("", "-386")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-ppc64le" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/ppc64le"]
|
||||||
|
tags = generate_tags("", "-ppc64le")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-s390x" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/s390x"]
|
||||||
|
tags = generate_tags("", "-s390x")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ==== End of unsupported Debian architecture targets ===
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "debian-all" {
|
||||||
|
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Alpine Baking ====
|
||||||
|
|
||||||
|
// Default Alpine target, will build a container using the hosts platform architecture
|
||||||
|
target "alpine" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.alpine"
|
||||||
|
tags = generate_tags("-alpine", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "alpine-multi" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "alpine-amd64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("-alpine", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-arm64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("-alpine", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv7" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("-alpine", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv6" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "alpine-all" {
|
||||||
|
targets = ["alpine-amd64", "alpine-arm64", "alpine-armv7", "alpine-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Bake everything locally ====
|
||||||
|
|
||||||
|
group "all" {
|
||||||
|
targets = ["debian-all", "alpine-all"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking functions ====
|
||||||
|
|
||||||
|
// This will return the local platform as amd64, arm64 or armv7 for example
|
||||||
|
// It can be used for creating a local image tag
|
||||||
|
function "platform_tag" {
|
||||||
|
params = []
|
||||||
|
result = "-${replace(replace(BAKE_LOCAL_PLATFORM, "linux/", ""), "/", "")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function "get_container_registries" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", CONTAINER_REGISTRIES))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "get_base_tags" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", BASE_TAGS))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "generate_tags" {
|
||||||
|
params = [
|
||||||
|
suffix, // What to append to the BASE_TAG when needed, like `-alpine` for example
|
||||||
|
platform // the platform we are building for if needed
|
||||||
|
]
|
||||||
|
result = flatten([
|
||||||
|
for registry in get_container_registries() :
|
||||||
|
[for base_tag in get_base_tags() :
|
||||||
|
concat(
|
||||||
|
# If the base_tag contains latest, and the suffix contains `-alpine` add a `:alpine` tag too
|
||||||
|
base_tag == "latest" ? suffix == "-alpine" ? ["${registry}:alpine${platform}"] : [] : [],
|
||||||
|
# The default tagging strategy
|
||||||
|
["${registry}:${base_tag}${suffix}${platform}"]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
])
|
||||||
|
}
|
||||||
|
|
||||||
|
function "image_index_annotations" {
|
||||||
|
params = []
|
||||||
|
result = flatten([
|
||||||
|
for key, value in labels() :
|
||||||
|
value != null ? formatlist("annotation-index.%s=%s", "${key}", "${value}") : []
|
||||||
|
])
|
||||||
|
}
|
@@ -1,16 +1,24 @@
|
|||||||
#!/bin/sh
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
# Use the value of the corresponding env var (if present),
|
# Use the value of the corresponding env var (if present),
|
||||||
# or a default value otherwise.
|
# or a default value otherwise.
|
||||||
: "${DATA_FOLDER:="data"}"
|
: "${DATA_FOLDER:="/data"}"
|
||||||
: "${ROCKET_PORT:="80"}"
|
: "${ROCKET_PORT:="80"}"
|
||||||
|
: "${ENV_FILE:="/.env"}"
|
||||||
|
|
||||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||||
|
|
||||||
|
# Check if the $ENV_FILE file exist and is readable
|
||||||
|
# If that is the case, load it into the environment before running any check
|
||||||
|
if [ -r "${ENV_FILE}" ]; then
|
||||||
|
# shellcheck disable=SC1090
|
||||||
|
. "${ENV_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
# Given a config key, return the corresponding config value from the
|
# Given a config key, return the corresponding config value from the
|
||||||
# config file. If the key doesn't exist, return an empty string.
|
# config file. If the key doesn't exist, return an empty string.
|
||||||
get_config_val() {
|
get_config_val() {
|
||||||
local key="$1"
|
key="$1"
|
||||||
# Extract a line of the form:
|
# Extract a line of the form:
|
||||||
# "domain": "https://bw.example.com/path",
|
# "domain": "https://bw.example.com/path",
|
||||||
grep "\"${key}\":" "${CONFIG_FILE}" |
|
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||||
|
105
docker/podman-bake.sh
Executable file
105
docker/podman-bake.sh
Executable file
@@ -0,0 +1,105 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Check if a target is given as first argument
|
||||||
|
# If not we assume the defaults and pass the given arguments to the podman command
|
||||||
|
case "${1}" in
|
||||||
|
alpine*|debian*)
|
||||||
|
TARGET="${1}"
|
||||||
|
# Now shift the $@ array so we only have the rest of the arguments
|
||||||
|
# This allows us too append these as extra arguments too the podman buildx build command
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
LABEL_ARGS=(
|
||||||
|
--label org.opencontainers.image.description="Unofficial Bitwarden compatible server written in Rust"
|
||||||
|
--label org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||||
|
--label org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
--label org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||||
|
--label org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
|
)
|
||||||
|
if [[ -n "${SOURCE_REPOSITORY_URL}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_COMMIT}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.revision="${SOURCE_COMMIT}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_VERSION}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.version="${SOURCE_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if and which --build-arg arguments we need to configure
|
||||||
|
BUILD_ARGS=()
|
||||||
|
if [[ -n "${DB}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg DB="${DB}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${CARGO_PROFILE}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg CARGO_PROFILE="${CARGO_PROFILE}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg VW_VERSION="${VW_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default BASE_TAGS if non are provided
|
||||||
|
if [[ -z "${BASE_TAGS}" ]]; then
|
||||||
|
BASE_TAGS="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default CONTAINER_REGISTRIES if non are provided
|
||||||
|
if [[ -z "${CONTAINER_REGISTRIES}" ]]; then
|
||||||
|
CONTAINER_REGISTRIES="vaultwarden/server"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check which Dockerfile we need to use, default is debian
|
||||||
|
case "${TARGET}" in
|
||||||
|
alpine*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-alpine"
|
||||||
|
DOCKERFILE="Dockerfile.alpine"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
DOCKERFILE="Dockerfile.debian"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Check which platform we need to build and append the BASE_TAGS with the architecture
|
||||||
|
case "${TARGET}" in
|
||||||
|
*-arm64)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-arm64"
|
||||||
|
PLATFORM="linux/arm64"
|
||||||
|
;;
|
||||||
|
*-armv7)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv7"
|
||||||
|
PLATFORM="linux/arm/v7"
|
||||||
|
;;
|
||||||
|
*-armv6)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv6"
|
||||||
|
PLATFORM="linux/arm/v6"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-amd64"
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Build the image with podman
|
||||||
|
# We use the docker format here since we are using `SHELL`, which is not supported by OCI
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
podman buildx build \
|
||||||
|
--platform="${PLATFORM}" \
|
||||||
|
--tag="${CONTAINER_REGISTRIES}:${BASE_TAGS}" \
|
||||||
|
--format=docker \
|
||||||
|
"${LABEL_ARGS[@]}" \
|
||||||
|
"${BUILD_ARGS[@]}" \
|
||||||
|
--file="${BASEDIR}/${DOCKERFILE}" "$@" \
|
||||||
|
"${BASEDIR}/.."
|
@@ -1,17 +1,31 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import os, argparse, json
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
|
# Load settings file
|
||||||
|
with open("DockerSettings.yaml", 'r') as yaml_file:
|
||||||
|
yaml_data = yaml.safe_load(yaml_file)
|
||||||
|
|
||||||
|
settings_env = jinja2.Environment(
|
||||||
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
|
)
|
||||||
|
settings_yaml = yaml.safe_load(settings_env.get_template("DockerSettings.yaml").render(yaml_data))
|
||||||
|
|
||||||
args_parser = argparse.ArgumentParser()
|
args_parser = argparse.ArgumentParser()
|
||||||
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
||||||
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
||||||
cli_args = args_parser.parse_args()
|
cli_args = args_parser.parse_args()
|
||||||
|
|
||||||
|
# Merge the default config yaml with the json arguments given.
|
||||||
render_vars = json.loads(cli_args.render_vars)
|
render_vars = json.loads(cli_args.render_vars)
|
||||||
|
settings_yaml.update(render_vars)
|
||||||
|
|
||||||
environment = jinja2.Environment(
|
environment = jinja2.Environment(
|
||||||
loader=jinja2.FileSystemLoader(os.getcwd()),
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
trim_blocks=True,
|
trim_blocks=True,
|
||||||
)
|
)
|
||||||
print(environment.get_template(cli_args.template_file).render(render_vars))
|
print(environment.get_template(cli_args.template_file).render(settings_yaml))
|
||||||
|
@@ -1,20 +0,0 @@
|
|||||||
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
|
||||||
|
|
||||||
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
|
||||||
|
|
||||||
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
|
||||||
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
|
||||||
* `COMMIT_MSG`: the message from the commit being tested and built.
|
|
||||||
* `DOCKER_REPO`: the name of the Docker repository being built.
|
|
||||||
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
|
||||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
|
||||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
|
||||||
|
|
||||||
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
|
||||||
|
|
||||||
## References
|
|
||||||
|
|
||||||
* https://docs.docker.com/docker-hub/builds/advanced/
|
|
||||||
* https://docs.docker.com/engine/reference/commandline/manifest/
|
|
||||||
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
|
||||||
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
|
@@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# The default Debian-based images support these arches for all database backends.
|
|
||||||
arches=(
|
|
||||||
amd64
|
|
||||||
armv6
|
|
||||||
armv7
|
|
||||||
arm64
|
|
||||||
)
|
|
||||||
export arches
|
|
||||||
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
distro_suffix=.alpine
|
|
||||||
fi
|
|
||||||
export distro_suffix
|
|
51
hooks/build
51
hooks/build
@@ -1,51 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
echo ">>> Building images..."
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
|
||||||
# This var is typically predefined by Docker Hub, but it won't be
|
|
||||||
# when testing locally.
|
|
||||||
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Construct a version string in the style of `build.rs`.
|
|
||||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
|
||||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
|
||||||
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
|
||||||
else
|
|
||||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
|
||||||
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
LABELS=(
|
|
||||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
|
||||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
|
||||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
|
||||||
org.opencontainers.image.licenses="AGPL-3.0-only"
|
|
||||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
|
||||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
|
||||||
org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
|
||||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
|
||||||
)
|
|
||||||
LABEL_ARGS=()
|
|
||||||
for label in "${LABELS[@]}"; do
|
|
||||||
LABEL_ARGS+=(--label "${label}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildkit as template
|
|
||||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
|
||||||
buildkit_suffix=.buildkit
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker build \
|
|
||||||
"${LABEL_ARGS[@]}" \
|
|
||||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
|
||||||
-f "docker/${arch}/Dockerfile${buildkit_suffix}${distro_suffix}" \
|
|
||||||
.
|
|
||||||
done
|
|
@@ -1,28 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# If requested, print some environment info for troubleshooting.
|
|
||||||
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
|
||||||
id
|
|
||||||
pwd
|
|
||||||
df -h
|
|
||||||
env
|
|
||||||
docker info
|
|
||||||
docker version
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install build dependencies.
|
|
||||||
deps=(
|
|
||||||
jq
|
|
||||||
)
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y "${deps[@]}"
|
|
||||||
|
|
||||||
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
|
||||||
# Git operations that we perform later, so fetch the complete history and
|
|
||||||
# tags first. Note that if the build is cached, the clone may have been
|
|
||||||
# unshallowed already; if so, unshallowing will fail, so skip it.
|
|
||||||
if [[ -f .git/shallow ]]; then
|
|
||||||
git fetch --unshallow --tags
|
|
||||||
fi
|
|
111
hooks/push
111
hooks/push
@@ -1,111 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
|
||||||
|
|
||||||
# Join a list of args with a single char.
|
|
||||||
# Ref: https://stackoverflow.com/a/17841619
|
|
||||||
join() { local IFS="$1"; shift; echo "$*"; }
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
echo ">>> Starting local Docker registry when needed..."
|
|
||||||
|
|
||||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
|
||||||
# builds, but it can't access existing images on the Docker host (like the
|
|
||||||
# cross-compiled ones we just built). Those images first need to be pushed to
|
|
||||||
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
|
||||||
# up those intermediate images on Docker Hub, it's easier to just run a local
|
|
||||||
# Docker registry, which gets cleaned up automatically once the build job ends.
|
|
||||||
#
|
|
||||||
# https://docs.docker.com/registry/deploying/
|
|
||||||
# https://hub.docker.com/_/registry
|
|
||||||
#
|
|
||||||
# Use host networking so the buildx container can access the registry via
|
|
||||||
# localhost.
|
|
||||||
#
|
|
||||||
# First check if there already is a registry container running, else skip it.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5000'; then
|
|
||||||
# defaults to port 5000
|
|
||||||
docker run -d --name registry --network host registry:2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
|
||||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
|
||||||
LOCAL_REGISTRY="localhost:5000"
|
|
||||||
REPO="${DOCKER_REPO#*/}"
|
|
||||||
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
|
||||||
|
|
||||||
echo ">>> Pushing images to local registry..."
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
docker tag "${docker_image}" "${local_image}"
|
|
||||||
docker push "${local_image}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ">>> Setting up Docker Buildx..."
|
|
||||||
|
|
||||||
# Same as earlier, use host networking so the buildx container can access the
|
|
||||||
# registry via localhost.
|
|
||||||
#
|
|
||||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
|
||||||
#
|
|
||||||
# Check if there already is a builder running, else skip this and use the existing.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! docker buildx inspect builder > /dev/null 2>&1 ; then
|
|
||||||
docker buildx create --name builder --use --driver-opt network=host
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ">>> Running Docker Buildx..."
|
|
||||||
|
|
||||||
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
|
||||||
|
|
||||||
# If the Docker tag starts with a version number, assume the latest release
|
|
||||||
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
|
||||||
# to make it easier for users to track the latest release.
|
|
||||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
tags+=("${DOCKER_REPO}:alpine")
|
|
||||||
else
|
|
||||||
tags+=("${DOCKER_REPO}:latest")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
tag_args=()
|
|
||||||
for tag in "${tags[@]}"; do
|
|
||||||
tag_args+=(--tag "${tag}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
|
||||||
# the arch list to a platform list (assuming the OS is always `linux`).
|
|
||||||
declare -A arch_to_platform=(
|
|
||||||
[amd64]="linux/amd64"
|
|
||||||
[armv6]="linux/arm/v6"
|
|
||||||
[armv7]="linux/arm/v7"
|
|
||||||
[arm64]="linux/arm64"
|
|
||||||
)
|
|
||||||
platforms=()
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
platforms+=("${arch_to_platform[$arch]}")
|
|
||||||
done
|
|
||||||
platform="$(join "," "${platforms[@]}")"
|
|
||||||
|
|
||||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
|
||||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
|
||||||
# context, which isn't needed here since the actual cross-compiled images
|
|
||||||
# have already been built.
|
|
||||||
docker buildx build \
|
|
||||||
--network host \
|
|
||||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
|
||||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
|
||||||
--platform "${platform}" \
|
|
||||||
"${tag_args[@]}" \
|
|
||||||
--push \
|
|
||||||
- < ./docker/Dockerfile.buildx
|
|
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL,
|
||||||
|
organization_uuid CHAR(36),
|
||||||
|
request_device_identifier CHAR(36) NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id CHAR(36),
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY master_password_hash TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY enc_key TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN `key` TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE attachments MODIFY file_size BIGINT NOT NULL;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor MODIFY last_used BIGINT NOT NULL;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_duo_ctx;
|
@@ -0,0 +1,8 @@
|
|||||||
|
CREATE TABLE twofactor_duo_ctx (
|
||||||
|
state VARCHAR(64) NOT NULL,
|
||||||
|
user_email VARCHAR(255) NOT NULL,
|
||||||
|
nonce VARCHAR(64) NOT NULL,
|
||||||
|
exp BIGINT NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (state)
|
||||||
|
);
|
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL,
|
||||||
|
organization_uuid CHAR(36),
|
||||||
|
request_device_identifier CHAR(36) NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id CHAR(36),
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date TIMESTAMP NOT NULL,
|
||||||
|
response_date TIMESTAMP,
|
||||||
|
authentication_date TIMESTAMP,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN master_password_hash DROP NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN enc_key DROP NOT NULL;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE attachments
|
||||||
|
ALTER COLUMN file_size TYPE BIGINT,
|
||||||
|
ALTER COLUMN file_size SET NOT NULL;
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE twofactor
|
||||||
|
ALTER COLUMN last_used TYPE BIGINT,
|
||||||
|
ALTER COLUMN last_used SET NOT NULL;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_duo_ctx;
|
@@ -0,0 +1,8 @@
|
|||||||
|
CREATE TABLE twofactor_duo_ctx (
|
||||||
|
state VARCHAR(64) NOT NULL,
|
||||||
|
user_email VARCHAR(255) NOT NULL,
|
||||||
|
nonce VARCHAR(64) NOT NULL,
|
||||||
|
exp BIGINT NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (state)
|
||||||
|
);
|
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
organization_uuid TEXT,
|
||||||
|
request_device_identifier TEXT NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id TEXT,
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
@@ -0,0 +1,29 @@
|
|||||||
|
-- Create new auth_requests table with master_password_hash as nullable column
|
||||||
|
CREATE TABLE auth_requests_new (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
organization_uuid TEXT,
|
||||||
|
request_device_identifier TEXT NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id TEXT,
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT,
|
||||||
|
master_password_hash TEXT,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY (user_uuid) REFERENCES users (uuid),
|
||||||
|
FOREIGN KEY (organization_uuid) REFERENCES organizations (uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer current data to new table
|
||||||
|
INSERT INTO auth_requests_new SELECT * FROM auth_requests;
|
||||||
|
|
||||||
|
-- Drop the old table
|
||||||
|
DROP TABLE auth_requests;
|
||||||
|
|
||||||
|
-- Rename the new table to the original name
|
||||||
|
ALTER TABLE auth_requests_new RENAME TO auth_requests;
|
@@ -0,0 +1,2 @@
|
|||||||
|
-- Add the external_id to the users_organizations table
|
||||||
|
ALTER TABLE "users_organizations" ADD COLUMN "external_id" TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
-- Integer size in SQLite is already i64, so we don't need to do anything
|
@@ -0,0 +1 @@
|
|||||||
|
-- Integer size in SQLite is already i64, so we don't need to do anything
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_duo_ctx;
|
@@ -0,0 +1,8 @@
|
|||||||
|
CREATE TABLE twofactor_duo_ctx (
|
||||||
|
state TEXT NOT NULL,
|
||||||
|
user_email TEXT NOT NULL,
|
||||||
|
nonce TEXT NOT NULL,
|
||||||
|
exp INTEGER NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (state)
|
||||||
|
);
|
@@ -1 +0,0 @@
|
|||||||
1.70.0
|
|
4
rust-toolchain.toml
Normal file
4
rust-toolchain.toml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[toolchain]
|
||||||
|
channel = "1.80.1"
|
||||||
|
components = [ "rustfmt", "clippy" ]
|
||||||
|
profile = "minimal"
|
109
src/api/admin.rs
109
src/api/admin.rs
@@ -1,4 +1,5 @@
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
use reqwest::Method;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::env;
|
use std::env;
|
||||||
@@ -13,14 +14,18 @@ use rocket::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::log_event, unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
|
api::{
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
core::{log_event, two_factor},
|
||||||
|
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
||||||
|
},
|
||||||
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp, Secure},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
|
http_client::make_http_request,
|
||||||
mail,
|
mail,
|
||||||
util::{
|
util::{
|
||||||
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
container_base_image, format_naive_datetime_local, get_display_size, is_running_in_container, NumberOrString,
|
||||||
},
|
},
|
||||||
CONFIG, VERSION,
|
CONFIG, VERSION,
|
||||||
};
|
};
|
||||||
@@ -164,7 +169,12 @@ struct LoginForm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/", data = "<data>")]
|
#[post("/", data = "<data>")]
|
||||||
fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> Result<Redirect, AdminResponse> {
|
fn post_admin_login(
|
||||||
|
data: Form<LoginForm>,
|
||||||
|
cookies: &CookieJar<'_>,
|
||||||
|
ip: ClientIp,
|
||||||
|
secure: Secure,
|
||||||
|
) -> Result<Redirect, AdminResponse> {
|
||||||
let data = data.into_inner();
|
let data = data.into_inner();
|
||||||
let redirect = data.redirect;
|
let redirect = data.redirect;
|
||||||
|
|
||||||
@@ -184,12 +194,12 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
|||||||
let claims = generate_admin_claims();
|
let claims = generate_admin_claims();
|
||||||
let jwt = encode_jwt(&claims);
|
let jwt = encode_jwt(&claims);
|
||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true)
|
||||||
.finish();
|
.secure(secure.https);
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
if let Some(redirect) = redirect {
|
if let Some(redirect) = redirect {
|
||||||
@@ -262,8 +272,8 @@ fn admin_page_login() -> ApiResult<Html<String>> {
|
|||||||
render_admin_login(None, None)
|
render_admin_login(None, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct InviteData {
|
struct InviteData {
|
||||||
email: String,
|
email: String,
|
||||||
}
|
}
|
||||||
@@ -279,12 +289,11 @@ async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
|||||||
#[post("/invite", data = "<data>")]
|
#[post("/invite", data = "<data>")]
|
||||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
let email = data.email.clone();
|
|
||||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||||
err_code!("User already exists", Status::Conflict.code)
|
err_code!("User already exists", Status::Conflict.code)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = User::new(email);
|
let mut user = User::new(data.email);
|
||||||
|
|
||||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
@@ -314,7 +323,7 @@ async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
|||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
Redirect::to(admin_path())
|
Redirect::to(admin_path())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -324,8 +333,12 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
|||||||
let mut users_json = Vec::with_capacity(users.len());
|
let mut users_json = Vec::with_capacity(users.len());
|
||||||
for u in users {
|
for u in users {
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["userEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
|
usr["lastActive"] = match u.last_active(&mut conn).await {
|
||||||
|
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||||
|
None => json!(None::<String>),
|
||||||
|
};
|
||||||
users_json.push(usr);
|
users_json.push(usr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -340,7 +353,7 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await));
|
||||||
usr["user_enabled"] = json!(u.enabled);
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||||
@@ -358,8 +371,8 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||||||
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["userEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
Ok(Json(usr))
|
Ok(Json(usr))
|
||||||
} else {
|
} else {
|
||||||
err_code!("User doesn't exist", Status::NotFound.code);
|
err_code!("User doesn't exist", Status::NotFound.code);
|
||||||
@@ -370,8 +383,8 @@ async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn)
|
|||||||
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let u = get_user_or_404(uuid, &mut conn).await?;
|
let u = get_user_or_404(uuid, &mut conn).await?;
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["userEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
Ok(Json(usr))
|
Ok(Json(usr))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -388,7 +401,7 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe
|
|||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
&user_org.org_uuid,
|
&user_org.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -407,7 +420,7 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif
|
|||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||||
match unregister_push_device(device.uuid).await {
|
match unregister_push_device(device.push_uuid).await {
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
||||||
};
|
};
|
||||||
@@ -443,9 +456,10 @@ async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyR
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
async fn remove_2fa(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
|
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&mut conn).await
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
@@ -468,7 +482,7 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
struct UserOrgTypeData {
|
struct UserOrgTypeData {
|
||||||
user_type: NumberOrString,
|
user_type: NumberOrString,
|
||||||
user_uuid: String,
|
user_uuid: String,
|
||||||
@@ -503,7 +517,11 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
|||||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
if CONFIG.email_2fa_auto_fallback() {
|
||||||
|
two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?;
|
||||||
|
} else {
|
||||||
|
err!("You cannot modify this user to this type because they have not setup 2FA");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
||||||
@@ -515,7 +533,7 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
|||||||
EventType::OrganizationUserUpdated as i32,
|
EventType::OrganizationUserUpdated as i32,
|
||||||
&user_to_edit.uuid,
|
&user_to_edit.uuid,
|
||||||
&data.org_uuid,
|
&data.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -543,7 +561,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
|||||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await));
|
||||||
organizations_json.push(org);
|
organizations_json.push(org);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -583,15 +601,15 @@ struct TimeApi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||||
let json_api = get_reqwest_client();
|
Ok(make_http_request(Method::GET, url)?.send().await?.error_for_status()?.json::<T>().await?)
|
||||||
|
|
||||||
Ok(json_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn has_http_access() -> bool {
|
async fn has_http_access() -> bool {
|
||||||
let http_access = get_reqwest_client();
|
let req = match make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") {
|
||||||
|
Ok(r) => r,
|
||||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await {
|
Err(_) => return false,
|
||||||
|
};
|
||||||
|
match req.send().await {
|
||||||
Ok(r) => r.status().is_success(),
|
Ok(r) => r.status().is_success(),
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
@@ -601,7 +619,7 @@ use cached::proc_macro::cached;
|
|||||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
||||||
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
||||||
#[cached(time = 300, sync_writes = true)]
|
#[cached(time = 300, sync_writes = true)]
|
||||||
async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) {
|
async fn get_release_info(has_http_access: bool, running_within_container: bool) -> (String, String, String) {
|
||||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||||
if has_http_access {
|
if has_http_access {
|
||||||
(
|
(
|
||||||
@@ -618,9 +636,9 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
|||||||
}
|
}
|
||||||
_ => "-".to_string(),
|
_ => "-".to_string(),
|
||||||
},
|
},
|
||||||
// Do not fetch the web-vault version when running within Docker.
|
// Do not fetch the web-vault version when running within a container.
|
||||||
// The web-vault version is embedded within the container it self, and should not be updated manually
|
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||||
if running_within_docker {
|
if running_within_container {
|
||||||
"-".to_string()
|
"-".to_string()
|
||||||
} else {
|
} else {
|
||||||
match get_json_api::<GitRelease>(
|
match get_json_api::<GitRelease>(
|
||||||
@@ -674,7 +692,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Execute some environment checks
|
// Execute some environment checks
|
||||||
let running_within_docker = is_running_in_docker();
|
let running_within_container = is_running_in_container();
|
||||||
let has_http_access = has_http_access().await;
|
let has_http_access = has_http_access().await;
|
||||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||||
|| env::var_os("http_proxy").is_some()
|
|| env::var_os("http_proxy").is_some()
|
||||||
@@ -688,12 +706,9 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
};
|
};
|
||||||
|
|
||||||
let (latest_release, latest_commit, latest_web_build) =
|
let (latest_release, latest_commit, latest_web_build) =
|
||||||
get_release_info(has_http_access, running_within_docker).await;
|
get_release_info(has_http_access, running_within_container).await;
|
||||||
|
|
||||||
let ip_header_name = match &ip_header.0 {
|
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||||
Some(h) => h,
|
|
||||||
_ => "",
|
|
||||||
};
|
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
"dns_resolved": dns_resolved,
|
"dns_resolved": dns_resolved,
|
||||||
@@ -703,11 +718,11 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||||
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
||||||
"latest_web_build": latest_web_build,
|
"latest_web_build": latest_web_build,
|
||||||
"running_within_docker": running_within_docker,
|
"running_within_container": running_within_container,
|
||||||
"docker_base_image": if running_within_docker { docker_base_image() } else { "Not applicable" },
|
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
||||||
"has_http_access": has_http_access,
|
"has_http_access": has_http_access,
|
||||||
"ip_header_exists": &ip_header.0.is_some(),
|
"ip_header_exists": !ip_header_name.is_empty(),
|
||||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
"ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
|
||||||
"ip_header_name": ip_header_name,
|
"ip_header_name": ip_header_name,
|
||||||
"ip_header_config": &CONFIG.ip_header(),
|
"ip_header_config": &CONFIG.ip_header(),
|
||||||
"uses_proxy": uses_proxy,
|
"uses_proxy": uses_proxy,
|
||||||
@@ -783,16 +798,16 @@ impl<'r> FromRequest<'r> for AdminToken {
|
|||||||
if requested_page.is_empty() {
|
if requested_page.is_empty() {
|
||||||
return Outcome::Forward(Status::Unauthorized);
|
return Outcome::Forward(Status::Unauthorized);
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::Unauthorized, "Unauthorized"));
|
return Outcome::Error((Status::Unauthorized, "Unauthorized"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if decode_admin(access_token).is_err() {
|
if decode_admin(access_token).is_err() {
|
||||||
// Remove admin cookie
|
// Remove admin cookie
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
return Outcome::Error((Status::Unauthorized, "Session expired"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,17 @@
|
|||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use rocket::{serde::json::Json, Route};
|
use rocket::{serde::json::Json, Route};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{CipherSyncData, CipherSyncType},
|
core::{CipherSyncData, CipherSyncType},
|
||||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
EmptyResult, JsonResult,
|
||||||
},
|
},
|
||||||
auth::{decode_emergency_access_invite, Headers},
|
auth::{decode_emergency_access_invite, Headers},
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
@@ -18,6 +20,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
get_grantees,
|
get_grantees,
|
||||||
get_emergency_access,
|
get_emergency_access,
|
||||||
put_emergency_access,
|
put_emergency_access,
|
||||||
|
post_emergency_access,
|
||||||
delete_emergency_access,
|
delete_emergency_access,
|
||||||
post_delete_emergency_access,
|
post_delete_emergency_access,
|
||||||
send_invite,
|
send_invite,
|
||||||
@@ -37,45 +40,66 @@ pub fn routes() -> Vec<Route> {
|
|||||||
// region get
|
// region get
|
||||||
|
|
||||||
#[get("/emergency-access/trusted")]
|
#[get("/emergency-access/trusted")]
|
||||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
check_emergency_access_allowed()?;
|
if !CONFIG.emergency_access_allowed() {
|
||||||
|
return Json(json!({
|
||||||
|
"data": [{
|
||||||
|
"id": "",
|
||||||
|
"status": 2,
|
||||||
|
"type": 0,
|
||||||
|
"waitTimeDays": 0,
|
||||||
|
"granteeId": "",
|
||||||
|
"email": "",
|
||||||
|
"name": "NOTE: Emergency Access is disabled!",
|
||||||
|
"object": "emergencyAccessGranteeDetails",
|
||||||
|
|
||||||
|
}],
|
||||||
|
"object": "list",
|
||||||
|
"continuationToken": null
|
||||||
|
}));
|
||||||
|
}
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
|
||||||
|
emergency_access_list_json.push(grantee)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/emergency-access/granted")]
|
#[get("/emergency-access/granted")]
|
||||||
async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
check_emergency_access_allowed()?;
|
let emergency_access_list = if CONFIG.emergency_access_allowed() {
|
||||||
|
EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await;
|
} else {
|
||||||
|
Vec::new()
|
||||||
|
};
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/emergency-access/<emer_id>")]
|
#[get("/emergency-access/<emer_id>")]
|
||||||
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
async fn get_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
Some(emergency_access) => Ok(Json(
|
||||||
|
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
|
||||||
|
)),
|
||||||
None => err!("Emergency access not valid."),
|
None => err!("Emergency access not valid."),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -85,42 +109,49 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
|||||||
// region put/post
|
// region put/post
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmergencyAccessUpdateData {
|
struct EmergencyAccessUpdateData {
|
||||||
Type: NumberOrString,
|
r#type: NumberOrString,
|
||||||
WaitTimeDays: i32,
|
wait_time_days: i32,
|
||||||
KeyEncrypted: Option<String>,
|
key_encrypted: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||||
async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
async fn put_emergency_access(
|
||||||
post_emergency_access(emer_id, data, conn).await
|
emer_id: &str,
|
||||||
|
data: Json<EmergencyAccessUpdateData>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
post_emergency_access(emer_id, data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||||
async fn post_emergency_access(
|
async fn post_emergency_access(
|
||||||
emer_id: &str,
|
emer_id: &str,
|
||||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
data: Json<EmergencyAccessUpdateData>,
|
||||||
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
let data: EmergencyAccessUpdateData = data.into_inner();
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emergency_access) => emergency_access,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emergency_access) => emergency_access,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
|
||||||
Some(new_type) => new_type as i32,
|
Some(new_type) => new_type as i32,
|
||||||
None => err!("Invalid emergency access type."),
|
None => err!("Invalid emergency access type."),
|
||||||
};
|
};
|
||||||
|
|
||||||
emergency_access.atype = new_type;
|
emergency_access.atype = new_type;
|
||||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
emergency_access.wait_time_days = data.wait_time_days;
|
||||||
if data.KeyEncrypted.is_some() {
|
if data.key_encrypted.is_some() {
|
||||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
emergency_access.key_encrypted = data.key_encrypted;
|
||||||
}
|
}
|
||||||
|
|
||||||
emergency_access.save(&mut conn).await?;
|
emergency_access.save(&mut conn).await?;
|
||||||
@@ -133,19 +164,23 @@ async fn post_emergency_access(
|
|||||||
|
|
||||||
#[delete("/emergency-access/<emer_id>")]
|
#[delete("/emergency-access/<emer_id>")]
|
||||||
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let grantor_user = headers.user;
|
let emergency_access = match (
|
||||||
|
EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await,
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await,
|
||||||
Some(emer) => {
|
) {
|
||||||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
(Some(grantor_emer), None) => {
|
||||||
err!("Emergency access not valid.")
|
info!("Grantor deleted emergency access {emer_id}");
|
||||||
}
|
grantor_emer
|
||||||
emer
|
|
||||||
}
|
}
|
||||||
None => err!("Emergency access not valid."),
|
(None, Some(grantee_emer)) => {
|
||||||
|
info!("Grantee deleted emergency access {emer_id}");
|
||||||
|
grantee_emer
|
||||||
|
}
|
||||||
|
_ => err!("Emergency access not valid."),
|
||||||
};
|
};
|
||||||
|
|
||||||
emergency_access.delete(&mut conn).await?;
|
emergency_access.delete(&mut conn).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -160,24 +195,24 @@ async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbC
|
|||||||
// region invite
|
// region invite
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmergencyAccessInviteData {
|
struct EmergencyAccessInviteData {
|
||||||
Email: String,
|
email: String,
|
||||||
Type: NumberOrString,
|
r#type: NumberOrString,
|
||||||
WaitTimeDays: i32,
|
wait_time_days: i32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/invite", data = "<data>")]
|
#[post("/emergency-access/invite", data = "<data>")]
|
||||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
let data: EmergencyAccessInviteData = data.into_inner();
|
||||||
let email = data.Email.to_lowercase();
|
let email = data.email.to_lowercase();
|
||||||
let wait_time_days = data.WaitTimeDays;
|
let wait_time_days = data.wait_time_days;
|
||||||
|
|
||||||
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
|
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
|
||||||
|
|
||||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
|
||||||
Some(new_type) => new_type as i32,
|
Some(new_type) => new_type as i32,
|
||||||
None => err!("Invalid emergency access type."),
|
None => err!("Invalid emergency access type."),
|
||||||
};
|
};
|
||||||
@@ -189,7 +224,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
err!("You can not set yourself as an emergency contact.")
|
err!("You can not set yourself as an emergency contact.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
|
||||||
None => {
|
None => {
|
||||||
if !CONFIG.invitations_allowed() {
|
if !CONFIG.invitations_allowed() {
|
||||||
err!(format!("Grantee user does not exist: {}", &email))
|
err!(format!("Grantee user does not exist: {}", &email))
|
||||||
@@ -206,9 +241,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
|
|
||||||
let mut user = User::new(email.clone());
|
let mut user = User::new(email.clone());
|
||||||
user.save(&mut conn).await?;
|
user.save(&mut conn).await?;
|
||||||
user
|
(user, true)
|
||||||
}
|
}
|
||||||
Some(user) => user,
|
Some(user) if user.password_hash.is_empty() => (user, true),
|
||||||
|
Some(user) => (user, false),
|
||||||
};
|
};
|
||||||
|
|
||||||
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||||
@@ -236,15 +272,9 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !new_user {
|
||||||
// Automatically mark user as accepted if no email invites
|
// if mail is not enabled immediately accept the invitation for existing users
|
||||||
match User::find_by_mail(&email, &mut conn).await {
|
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
},
|
|
||||||
None => err!("Grantee user not found."),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -252,16 +282,13 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||||
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
if emergency_access.grantor_uuid != headers.user.uuid {
|
|
||||||
err!("Emergency access not valid.");
|
|
||||||
}
|
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
||||||
err!("The grantee user is already accepted or confirmed to the organization");
|
err!("The grantee user is already accepted or confirmed to the organization");
|
||||||
@@ -288,38 +315,33 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
|
|||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !grantee_user.password_hash.is_empty() {
|
||||||
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
// accept the invitation for existing user
|
||||||
let invitation = Invitation::new(&email);
|
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
invitation.save(&mut conn).await?;
|
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||||
}
|
let invitation = Invitation::new(&email);
|
||||||
|
invitation.save(&mut conn).await?;
|
||||||
// Automatically mark user as accepted if no email invites
|
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct AcceptData {
|
struct AcceptData {
|
||||||
Token: String,
|
token: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||||
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: AcceptData = data.into_inner().data;
|
let data: AcceptData = data.into_inner();
|
||||||
let token = &data.Token;
|
let token = &data.token;
|
||||||
let claims = decode_emergency_access_invite(token)?;
|
let claims = decode_emergency_access_invite(token)?;
|
||||||
|
|
||||||
// This can happen if the user who received the invite used a different email to signup.
|
// This can happen if the user who received the invite used a different email to signup.
|
||||||
// Since we do not know if this is intented, we error out here and do nothing with the invite.
|
// Since we do not know if this is intended, we error out here and do nothing with the invite.
|
||||||
if claims.email != headers.user.email {
|
if claims.email != headers.user.email {
|
||||||
err!("Claim email does not match current users email")
|
err!("Claim email does not match current users email")
|
||||||
}
|
}
|
||||||
@@ -332,10 +354,13 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||||||
None => err!("Invited user not found"),
|
None => err!("Invited user not found"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
// We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database.
|
||||||
Some(emer) => emer,
|
// The uuid of the grantee gets stored once accepted.
|
||||||
None => err!("Emergency access not valid."),
|
let mut emergency_access =
|
||||||
};
|
match EmergencyAccess::find_by_uuid_and_grantee_email(emer_id, &headers.user.email, &mut conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
// get grantor user to send Accepted email
|
// get grantor user to send Accepted email
|
||||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||||
@@ -347,10 +372,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||||||
&& grantor_user.name == claims.grantor_name
|
&& grantor_user.name == claims.grantor_name
|
||||||
&& grantor_user.email == claims.grantor_email
|
&& grantor_user.email == claims.grantor_email
|
||||||
{
|
{
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
||||||
@@ -362,49 +384,30 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn accept_invite_process(
|
|
||||||
grantee_uuid: &str,
|
|
||||||
emergency_access: &mut EmergencyAccess,
|
|
||||||
grantee_email: &str,
|
|
||||||
conn: &mut DbConn,
|
|
||||||
) -> EmptyResult {
|
|
||||||
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
|
|
||||||
err!("User email does not match invite.");
|
|
||||||
}
|
|
||||||
|
|
||||||
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
|
|
||||||
err!("Emergency contact already accepted.");
|
|
||||||
}
|
|
||||||
|
|
||||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
|
||||||
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
|
|
||||||
emergency_access.email = None;
|
|
||||||
emergency_access.save(conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct ConfirmData {
|
struct ConfirmData {
|
||||||
Key: String,
|
key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||||
async fn confirm_emergency_access(
|
async fn confirm_emergency_access(
|
||||||
emer_id: &str,
|
emer_id: &str,
|
||||||
data: JsonUpcase<ConfirmData>,
|
data: Json<ConfirmData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let confirming_user = headers.user;
|
let confirming_user = headers.user;
|
||||||
let data: ConfirmData = data.into_inner().data;
|
let data: ConfirmData = data.into_inner();
|
||||||
let key = data.Key;
|
let key = data.key;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &confirming_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
||||||
|| emergency_access.grantor_uuid != confirming_user.uuid
|
|| emergency_access.grantor_uuid != confirming_user.uuid
|
||||||
@@ -444,17 +447,16 @@ async fn confirm_emergency_access(
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/initiate")]
|
#[post("/emergency-access/<emer_id>/initiate")]
|
||||||
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let initiating_user = headers.user;
|
let initiating_user = headers.user;
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &initiating_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 {
|
||||||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid)
|
|
||||||
{
|
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -484,16 +486,15 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/approve")]
|
#[post("/emergency-access/<emer_id>/approve")]
|
||||||
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 {
|
||||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
|
||||||
{
|
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,25 +523,20 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reject")]
|
#[post("/emergency-access/<emer_id>/reject")]
|
||||||
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32
|
||||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
|
||||||
{
|
{
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
|
||||||
Some(user) => user,
|
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
@@ -551,7 +547,7 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
|||||||
emergency_access.save(&mut conn).await?;
|
emergency_access.save(&mut conn).await?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?;
|
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &headers.user.name).await?;
|
||||||
}
|
}
|
||||||
Ok(Json(emergency_access.to_json()))
|
Ok(Json(emergency_access.to_json()))
|
||||||
} else {
|
} else {
|
||||||
@@ -565,12 +561,13 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/view")]
|
#[post("/emergency-access/<emer_id>/view")]
|
||||||
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
@@ -594,21 +591,22 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
|
|||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Ciphers": ciphers_json,
|
"ciphers": ciphers_json,
|
||||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
"keyEncrypted": &emergency_access.key_encrypted,
|
||||||
"Object": "emergencyAccessView",
|
"object": "emergencyAccessView",
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/takeover")]
|
#[post("/emergency-access/<emer_id>/takeover")]
|
||||||
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
@@ -620,42 +618,43 @@ async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||||||
};
|
};
|
||||||
|
|
||||||
let result = json!({
|
let result = json!({
|
||||||
"Kdf": grantor_user.client_kdf_type,
|
"kdf": grantor_user.client_kdf_type,
|
||||||
"KdfIterations": grantor_user.client_kdf_iter,
|
"kdfIterations": grantor_user.client_kdf_iter,
|
||||||
"KdfMemory": grantor_user.client_kdf_memory,
|
"kdfMemory": grantor_user.client_kdf_memory,
|
||||||
"KdfParallelism": grantor_user.client_kdf_parallelism,
|
"kdfParallelism": grantor_user.client_kdf_parallelism,
|
||||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
"keyEncrypted": &emergency_access.key_encrypted,
|
||||||
"Object": "emergencyAccessTakeover",
|
"object": "emergencyAccessTakeover",
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmergencyAccessPasswordData {
|
struct EmergencyAccessPasswordData {
|
||||||
NewMasterPasswordHash: String,
|
new_master_password_hash: String,
|
||||||
Key: String,
|
key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||||
async fn password_emergency_access(
|
async fn password_emergency_access(
|
||||||
emer_id: &str,
|
emer_id: &str,
|
||||||
data: JsonUpcase<EmergencyAccessPasswordData>,
|
data: Json<EmergencyAccessPasswordData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
let data: EmergencyAccessPasswordData = data.into_inner();
|
||||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
let new_master_password_hash = &data.new_master_password_hash;
|
||||||
//let key = &data.Key;
|
//let key = &data.Key;
|
||||||
|
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
@@ -667,7 +666,7 @@ async fn password_emergency_access(
|
|||||||
};
|
};
|
||||||
|
|
||||||
// change grantor_user password
|
// change grantor_user password
|
||||||
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None);
|
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None);
|
||||||
grantor_user.save(&mut conn).await?;
|
grantor_user.save(&mut conn).await?;
|
||||||
|
|
||||||
// Disable TwoFactor providers since they will otherwise block logins
|
// Disable TwoFactor providers since they will otherwise block logins
|
||||||
@@ -687,10 +686,11 @@ async fn password_emergency_access(
|
|||||||
#[get("/emergency-access/<emer_id>/policies")]
|
#[get("/emergency-access/<emer_id>/policies")]
|
||||||
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
@@ -705,9 +705,9 @@ async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||||||
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": policies_json,
|
"data": policies_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -722,9 +722,9 @@ fn is_valid_request(
|
|||||||
&& emergency_access.atype == requested_access_type as i32
|
&& emergency_access.atype == requested_access_type as i32
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_emergency_access_allowed() -> EmptyResult {
|
fn check_emergency_access_enabled() -> EmptyResult {
|
||||||
if !CONFIG.emergency_access_allowed() {
|
if !CONFIG.emergency_access_allowed() {
|
||||||
err!("Emergency access is not allowed.")
|
err!("Emergency access is not enabled.")
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -746,7 +746,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) {
|
|||||||
for mut emer in emergency_access_list {
|
for mut emer in emergency_access_list {
|
||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
let recovery_allowed_at =
|
let recovery_allowed_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days)).unwrap();
|
||||||
if recovery_allowed_at.le(&now) {
|
if recovery_allowed_at.le(&now) {
|
||||||
// Only update the access status
|
// Only update the access status
|
||||||
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
||||||
@@ -802,10 +802,10 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
|||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
// Calculate the day before the recovery will become active
|
// Calculate the day before the recovery will become active
|
||||||
let final_recovery_reminder_at =
|
let final_recovery_reminder_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days - 1)).unwrap();
|
||||||
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
||||||
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
||||||
last_notification_at + Duration::days(1)
|
last_notification_at + TimeDelta::try_days(1).unwrap()
|
||||||
} else {
|
} else {
|
||||||
now
|
now
|
||||||
};
|
};
|
||||||
|
@@ -5,7 +5,7 @@ use rocket::{form::FromForm, serde::json::Json, Route};
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcaseVec},
|
api::{EmptyResult, JsonResult},
|
||||||
auth::{AdminHeaders, Headers},
|
auth::{AdminHeaders, Headers},
|
||||||
db::{
|
db::{
|
||||||
models::{Cipher, Event, UserOrganization},
|
models::{Cipher, Event, UserOrganization},
|
||||||
@@ -22,7 +22,6 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromForm)]
|
#[derive(FromForm)]
|
||||||
#[allow(non_snake_case)]
|
|
||||||
struct EventRange {
|
struct EventRange {
|
||||||
start: String,
|
start: String,
|
||||||
end: String,
|
end: String,
|
||||||
@@ -53,9 +52,9 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": events_json,
|
"data": events_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": get_continuation_token(&events_json),
|
"continuationToken": get_continuation_token(&events_json),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,9 +84,9 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": events_json,
|
"data": events_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": get_continuation_token(&events_json),
|
"continuationToken": get_continuation_token(&events_json),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,13 +118,13 @@ async fn get_user_events(
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": events_json,
|
"data": events_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": get_continuation_token(&events_json),
|
"continuationToken": get_continuation_token(&events_json),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
|
fn get_continuation_token(events_json: &[Value]) -> Option<&str> {
|
||||||
// When the length of the vec equals the max page_size there probably is more data
|
// When the length of the vec equals the max page_size there probably is more data
|
||||||
// When it is less, then all events are loaded.
|
// When it is less, then all events are loaded.
|
||||||
if events_json.len() as i64 == Event::PAGE_SIZE {
|
if events_json.len() as i64 == Event::PAGE_SIZE {
|
||||||
@@ -145,33 +144,33 @@ pub fn main_routes() -> Vec<Route> {
|
|||||||
routes![post_events_collect,]
|
routes![post_events_collect,]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EventCollection {
|
struct EventCollection {
|
||||||
// Mandatory
|
// Mandatory
|
||||||
Type: i32,
|
r#type: i32,
|
||||||
Date: String,
|
date: String,
|
||||||
|
|
||||||
// Optional
|
// Optional
|
||||||
CipherId: Option<String>,
|
cipher_id: Option<String>,
|
||||||
OrganizationId: Option<String>,
|
organization_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upstream:
|
// Upstream:
|
||||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
||||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||||
#[post("/collect", format = "application/json", data = "<data>")]
|
#[post("/collect", format = "application/json", data = "<data>")]
|
||||||
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
if !CONFIG.org_events_enabled() {
|
if !CONFIG.org_events_enabled() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
for event in data.iter().map(|d| &d.data) {
|
for event in data.iter() {
|
||||||
let event_date = parse_date(&event.Date);
|
let event_date = parse_date(&event.date);
|
||||||
match event.Type {
|
match event.r#type {
|
||||||
1000..=1099 => {
|
1000..=1099 => {
|
||||||
_log_user_event(
|
_log_user_event(
|
||||||
event.Type,
|
event.r#type,
|
||||||
&headers.user.uuid,
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
Some(event_date),
|
Some(event_date),
|
||||||
@@ -181,9 +180,9 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
|
|||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
1600..=1699 => {
|
1600..=1699 => {
|
||||||
if let Some(org_uuid) = &event.OrganizationId {
|
if let Some(org_uuid) = &event.organization_id {
|
||||||
_log_event(
|
_log_event(
|
||||||
event.Type,
|
event.r#type,
|
||||||
org_uuid,
|
org_uuid,
|
||||||
org_uuid,
|
org_uuid,
|
||||||
&headers.user.uuid,
|
&headers.user.uuid,
|
||||||
@@ -196,11 +195,11 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
if let Some(cipher_uuid) = &event.CipherId {
|
if let Some(cipher_uuid) = &event.cipher_id {
|
||||||
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||||
if let Some(org_uuid) = cipher.organization_uuid {
|
if let Some(org_uuid) = cipher.organization_uuid {
|
||||||
_log_event(
|
_log_event(
|
||||||
event.Type,
|
event.r#type,
|
||||||
cipher_uuid,
|
cipher_uuid,
|
||||||
&org_uuid,
|
&org_uuid,
|
||||||
&headers.user.uuid,
|
&headers.user.uuid,
|
||||||
@@ -263,7 +262,7 @@ pub async fn log_event(
|
|||||||
event_type: i32,
|
event_type: i32,
|
||||||
source_uuid: &str,
|
source_uuid: &str,
|
||||||
org_uuid: &str,
|
org_uuid: &str,
|
||||||
act_user_uuid: String,
|
act_user_uuid: &str,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
ip: &IpAddr,
|
ip: &IpAddr,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
@@ -271,7 +270,7 @@ pub async fn log_event(
|
|||||||
if !CONFIG.org_events_enabled() {
|
if !CONFIG.org_events_enabled() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
|
_log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
@@ -289,7 +288,7 @@ async fn _log_event(
|
|||||||
let mut event = Event::new(event_type, event_date);
|
let mut event = Event::new(event_type, event_date);
|
||||||
match event_type {
|
match event_type {
|
||||||
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
||||||
// Collection Events
|
// Cipher Events
|
||||||
1100..=1199 => {
|
1100..=1199 => {
|
||||||
event.cipher_uuid = Some(String::from(source_uuid));
|
event.cipher_uuid = Some(String::from(source_uuid));
|
||||||
}
|
}
|
||||||
|
@@ -2,7 +2,7 @@ use rocket::serde::json::Json;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{EmptyResult, JsonResult, Notify, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
};
|
};
|
||||||
@@ -17,9 +17,9 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data": folders_json,
|
"data": folders_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null,
|
"continuationToken": null,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,16 +38,17 @@ async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct FolderData {
|
pub struct FolderData {
|
||||||
pub Name: String,
|
pub name: String,
|
||||||
|
pub id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders", data = "<data>")]
|
#[post("/folders", data = "<data>")]
|
||||||
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner();
|
||||||
|
|
||||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
let mut folder = Folder::new(headers.user.uuid, data.name);
|
||||||
|
|
||||||
folder.save(&mut conn).await?;
|
folder.save(&mut conn).await?;
|
||||||
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
|
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
|
||||||
@@ -56,25 +57,19 @@ async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>", data = "<data>")]
|
#[post("/folders/<uuid>", data = "<data>")]
|
||||||
async fn post_folder(
|
async fn post_folder(uuid: &str, data: Json<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
uuid: &str,
|
|
||||||
data: JsonUpcase<FolderData>,
|
|
||||||
headers: Headers,
|
|
||||||
conn: DbConn,
|
|
||||||
nt: Notify<'_>,
|
|
||||||
) -> JsonResult {
|
|
||||||
put_folder(uuid, data, headers, conn, nt).await
|
put_folder(uuid, data, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/folders/<uuid>", data = "<data>")]
|
#[put("/folders/<uuid>", data = "<data>")]
|
||||||
async fn put_folder(
|
async fn put_folder(
|
||||||
uuid: &str,
|
uuid: &str,
|
||||||
data: JsonUpcase<FolderData>,
|
data: Json<FolderData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner();
|
||||||
|
|
||||||
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||||
Some(folder) => folder,
|
Some(folder) => folder,
|
||||||
@@ -85,7 +80,7 @@ async fn put_folder(
|
|||||||
err!("Folder belongs to another user")
|
err!("Folder belongs to another user")
|
||||||
}
|
}
|
||||||
|
|
||||||
folder.name = data.Name;
|
folder.name = data.name;
|
||||||
|
|
||||||
folder.save(&mut conn).await?;
|
folder.save(&mut conn).await?;
|
||||||
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
|
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
|
||||||
|
@@ -8,11 +8,12 @@ mod public;
|
|||||||
mod sends;
|
mod sends;
|
||||||
pub mod two_factor;
|
pub mod two_factor;
|
||||||
|
|
||||||
|
pub use accounts::purge_auth_requests;
|
||||||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||||
|
use reqwest::Method;
|
||||||
pub use sends::purge_sends;
|
pub use sends::purge_sends;
|
||||||
pub use two_factor::send_incomplete_2fa_notifications;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||||
@@ -46,23 +47,23 @@ pub fn events_routes() -> Vec<Route> {
|
|||||||
//
|
//
|
||||||
// Move this somewhere else
|
// Move this somewhere else
|
||||||
//
|
//
|
||||||
use rocket::{serde::json::Json, Catcher, Route};
|
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{JsonResult, Notify, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::DbConn,
|
db::DbConn,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::get_reqwest_client,
|
http_client::make_http_request,
|
||||||
|
util::parse_experimental_client_feature_flags,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct GlobalDomain {
|
struct GlobalDomain {
|
||||||
Type: i32,
|
r#type: i32,
|
||||||
Domains: Vec<String>,
|
domains: Vec<String>,
|
||||||
Excluded: bool,
|
excluded: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
||||||
@@ -82,38 +83,38 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
|
|||||||
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
|
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
|
||||||
|
|
||||||
for global in &mut globals {
|
for global in &mut globals {
|
||||||
global.Excluded = excluded_globals.contains(&global.Type);
|
global.excluded = excluded_globals.contains(&global.r#type);
|
||||||
}
|
}
|
||||||
|
|
||||||
if no_excluded {
|
if no_excluded {
|
||||||
globals.retain(|g| !g.Excluded);
|
globals.retain(|g| !g.excluded);
|
||||||
}
|
}
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"EquivalentDomains": equivalent_domains,
|
"equivalentDomains": equivalent_domains,
|
||||||
"GlobalEquivalentDomains": globals,
|
"globalEquivalentDomains": globals,
|
||||||
"Object": "domains",
|
"object": "domains",
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EquivDomainData {
|
struct EquivDomainData {
|
||||||
ExcludedGlobalEquivalentDomains: Option<Vec<i32>>,
|
excluded_global_equivalent_domains: Option<Vec<i32>>,
|
||||||
EquivalentDomains: Option<Vec<Vec<String>>>,
|
equivalent_domains: Option<Vec<Vec<String>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/settings/domains", data = "<data>")]
|
#[post("/settings/domains", data = "<data>")]
|
||||||
async fn post_eq_domains(
|
async fn post_eq_domains(
|
||||||
data: JsonUpcase<EquivDomainData>,
|
data: Json<EquivDomainData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: EquivDomainData = data.into_inner().data;
|
let data: EquivDomainData = data.into_inner();
|
||||||
|
|
||||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
let excluded_globals = data.excluded_global_equivalent_domains.unwrap_or_default();
|
||||||
let equivalent_domains = data.EquivalentDomains.unwrap_or_default();
|
let equivalent_domains = data.equivalent_domains.unwrap_or_default();
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
use serde_json::to_string;
|
use serde_json::to_string;
|
||||||
@@ -129,12 +130,7 @@ async fn post_eq_domains(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/settings/domains", data = "<data>")]
|
#[put("/settings/domains", data = "<data>")]
|
||||||
async fn put_eq_domains(
|
async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
data: JsonUpcase<EquivDomainData>,
|
|
||||||
headers: Headers,
|
|
||||||
conn: DbConn,
|
|
||||||
nt: Notify<'_>,
|
|
||||||
) -> JsonResult {
|
|
||||||
post_eq_domains(data, headers, conn, nt).await
|
post_eq_domains(data, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,9 +141,7 @@ async fn hibp_breach(username: &str) -> JsonResult {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||||
let hibp_client = get_reqwest_client();
|
let res = make_http_request(Method::GET, &url)?.header("hibp-api-key", api_key).send().await?;
|
||||||
|
|
||||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?;
|
|
||||||
|
|
||||||
// If we get a 404, return a 404, it means no breached accounts
|
// If we get a 404, return a 404, it means no breached accounts
|
||||||
if res.status() == 404 {
|
if res.status() == 404 {
|
||||||
@@ -158,15 +152,15 @@ async fn hibp_breach(username: &str) -> JsonResult {
|
|||||||
Ok(Json(value))
|
Ok(Json(value))
|
||||||
} else {
|
} else {
|
||||||
Ok(Json(json!([{
|
Ok(Json(json!([{
|
||||||
"Name": "HaveIBeenPwned",
|
"name": "HaveIBeenPwned",
|
||||||
"Title": "Manual HIBP Check",
|
"title": "Manual HIBP Check",
|
||||||
"Domain": "haveibeenpwned.com",
|
"domain": "haveibeenpwned.com",
|
||||||
"BreachDate": "2019-08-18T00:00:00Z",
|
"breachDate": "2019-08-18T00:00:00Z",
|
||||||
"AddedDate": "2019-08-18T00:00:00Z",
|
"addedDate": "2019-08-18T00:00:00Z",
|
||||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
"description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
||||||
"LogoPath": "vw_static/hibp.png",
|
"logoPath": "vw_static/hibp.png",
|
||||||
"PwnCount": 0,
|
"pwnCount": 0,
|
||||||
"DataClasses": [
|
"dataClasses": [
|
||||||
"Error - No API key set!"
|
"Error - No API key set!"
|
||||||
]
|
]
|
||||||
}])))
|
}])))
|
||||||
@@ -192,12 +186,24 @@ fn version() -> Json<&'static str> {
|
|||||||
#[get("/config")]
|
#[get("/config")]
|
||||||
fn config() -> Json<Value> {
|
fn config() -> Json<Value> {
|
||||||
let domain = crate::CONFIG.domain();
|
let domain = crate::CONFIG.domain();
|
||||||
|
let mut feature_states =
|
||||||
|
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||||
|
// Force the new key rotation feature
|
||||||
|
feature_states.insert("key-rotation-improvements".to_string(), true);
|
||||||
|
feature_states.insert("flexible-collections-v-1".to_string(), false);
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"version": crate::VERSION,
|
// Note: The clients use this version to handle backwards compatibility concerns
|
||||||
|
// This means they expect a version that closely matches the Bitwarden server version
|
||||||
|
// We should make sure that we keep this updated when we support the new server features
|
||||||
|
// Version history:
|
||||||
|
// - Individual cipher key encryption: 2023.9.1
|
||||||
|
"version": "2024.2.0",
|
||||||
"gitHash": option_env!("GIT_REV"),
|
"gitHash": option_env!("GIT_REV"),
|
||||||
"server": {
|
"server": {
|
||||||
"name": "Vaultwarden",
|
"name": "Vaultwarden",
|
||||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
"url": "https://github.com/dani-garcia/vaultwarden",
|
||||||
|
"version": crate::VERSION
|
||||||
},
|
},
|
||||||
"environment": {
|
"environment": {
|
||||||
"vault": domain,
|
"vault": domain,
|
||||||
@@ -206,6 +212,7 @@ fn config() -> Json<Value> {
|
|||||||
"notifications": format!("{domain}/notifications"),
|
"notifications": format!("{domain}/notifications"),
|
||||||
"sso": "",
|
"sso": "",
|
||||||
},
|
},
|
||||||
|
"featureStates": feature_states,
|
||||||
"object": "config",
|
"object": "config",
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user