mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 10:45:57 +03:00
Compare commits
199 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
4438da39f9 | ||
|
0b2383ab56 | ||
|
ad1d65bdf8 | ||
|
3b283c289e | ||
|
4b9384cb2b | ||
|
0f39d96518 | ||
|
edf7484a70 | ||
|
8b66e34415 | ||
|
1d00e34bbb | ||
|
1b801406d6 | ||
|
5e46a43306 | ||
|
5c77431c2d | ||
|
2775c6ce8a | ||
|
890e668071 | ||
|
596c167312 | ||
|
ae3a153bdb | ||
|
2c36993792 | ||
|
d672ad3f76 | ||
|
a641b48884 | ||
|
98b2178c7d | ||
|
76a3f0f531 | ||
|
c5665e7b77 | ||
|
cbdcf8ef9f | ||
|
3337594d60 | ||
|
2daa8be1f1 | ||
|
eccb3ab947 | ||
|
3246251f29 | ||
|
8ab200224e | ||
|
34e00e1478 | ||
|
0fdda3bc2f | ||
|
48836501bf | ||
|
f863ffb89a | ||
|
03c6ed2e07 | ||
|
efc6eb0073 | ||
|
cec1e87679 | ||
|
512b3b9b7c | ||
|
93da5091e6 | ||
|
915496c103 | ||
|
ecb31c85d6 | ||
|
d722328f05 | ||
|
cb4b683dcd | ||
|
6eaf131922 | ||
|
8933ac2ee7 | ||
|
6822e445bb | ||
|
18fbc1ccf6 | ||
|
4861f6decc | ||
|
b435ee49ad | ||
|
193f86e43e | ||
|
66a7baa67c | ||
|
18d66474e0 | ||
|
ff8db4fd78 | ||
|
b2f9af718e | ||
|
198fd2fc1d | ||
|
ec8a9c82df | ||
|
ef5e0bd4e5 | ||
|
30b408eaa9 | ||
|
e205e3b7db | ||
|
ca1a9e26d8 | ||
|
f3a1385aee | ||
|
008a2cf298 | ||
|
f0c9a7fbc3 | ||
|
9162b13123 | ||
|
480bf9b0c1 | ||
|
f96c5e8a1e | ||
|
3d4be24902 | ||
|
bf41d74501 | ||
|
01e33a4919 | ||
|
bc26bfa589 | ||
|
ccc51e7580 | ||
|
99a59bc4f3 | ||
|
a77482575a | ||
|
bbd630f1ee | ||
|
d18b793c71 | ||
|
d3a1d875d5 | ||
|
d6e0ace192 | ||
|
60cbfa59bf | ||
|
5ab7010c37 | ||
|
ad2cfd8b97 | ||
|
32543c46da | ||
|
66bff73ebf | ||
|
83d5432cbf | ||
|
f579a4154c | ||
|
f5a19c5f8b | ||
|
aa9bc1f785 | ||
|
f162e85e44 | ||
|
33ef70c192 | ||
|
3d2df6ce11 | ||
|
6cdcb3b297 | ||
|
d1af468700 | ||
|
ae1c53f4e5 | ||
|
bc57c4b193 | ||
|
61ae4c9cf5 | ||
|
8d7b3db33d | ||
|
e9ec3741ae | ||
|
dacd50f3f1 | ||
|
9412112639 | ||
|
aaeae16983 | ||
|
d892880dd2 | ||
|
4395e8e888 | ||
|
3dbfc484a5 | ||
|
4ec2507073 | ||
|
ab65d7989b | ||
|
8707728cdb | ||
|
631d022e17 | ||
|
211f4492fa | ||
|
61f9081827 | ||
|
a8e5384c4a | ||
|
1c7338c7c4 | ||
|
08f37b9935 | ||
|
4826ddca4c | ||
|
2b32b6f78c | ||
|
a6cfdddfd8 | ||
|
814ce9a6ac | ||
|
1bee46f64b | ||
|
556d945396 | ||
|
664b480c71 | ||
|
84e901b7d2 | ||
|
839b2bc950 | ||
|
6050c8dac5 | ||
|
0a6b797e6e | ||
|
fb6f441a4f | ||
|
9876aedd67 | ||
|
19e671ff25 | ||
|
60964c07e6 | ||
|
e4894524e4 | ||
|
e7f083dee9 | ||
|
1074315a87 | ||
|
c56bf38079 | ||
|
3c0cac623d | ||
|
550794b127 | ||
|
e818a0bf37 | ||
|
2aedff50e8 | ||
|
84a23008f4 | ||
|
44e9e1a58e | ||
|
e4606431d1 | ||
|
5b7d7390b0 | ||
|
a05187c0ff | ||
|
8e34495e73 | ||
|
4219249e11 | ||
|
bd883de70e | ||
|
2d66292350 | ||
|
adf67a8ee8 | ||
|
f40f5b8399 | ||
|
2d6ca0ea95 | ||
|
06a10e2c5a | ||
|
445680fb84 | ||
|
83376544d8 | ||
|
04a17dcdef | ||
|
0851561392 | ||
|
95cd6deda6 | ||
|
636f16dc66 | ||
|
9e5b049dca | ||
|
23aa9088f3 | ||
|
4f0ed06b06 | ||
|
349c97efaf | ||
|
8b05a5d192 | ||
|
83bf77d713 | ||
|
4d5c047ddc | ||
|
147c9c7b50 | ||
|
6515a2fcad | ||
|
4a2ed553df | ||
|
ba492c0602 | ||
|
1ec049e2b5 | ||
|
0fb8563b13 | ||
|
f906f6230a | ||
|
951ba55123 | ||
|
18abf226be | ||
|
393645617e | ||
|
5bf243b675 | ||
|
cfba8347a3 | ||
|
55c1b6e8d5 | ||
|
3d7e80a7aa | ||
|
5866338de4 | ||
|
271e3ae757 | ||
|
48cc31a59f | ||
|
6a7cee4e7e | ||
|
f850dbb310 | ||
|
07099df41a | ||
|
0c0a80720e | ||
|
ae437f70a3 | ||
|
3d11f4cd16 | ||
|
3bd4e42fb0 | ||
|
89e94b1d91 | ||
|
0b28ab3be1 | ||
|
c5bcc340fa | ||
|
bff54fbfdb | ||
|
867c6ba056 | ||
|
d1ecf03f44 | ||
|
fc43608eec | ||
|
15dd05c78d | ||
|
aa6f774f65 | ||
|
379f885354 | ||
|
39a5f2dbe8 | ||
|
0daaa9b175 | ||
|
0c085d21ce | ||
|
dcaaa430f0 | ||
|
2cda54ceff | ||
|
525e6bb65a | ||
|
62cebebd3d |
488
.env.template
488
.env.template
@@ -10,22 +10,63 @@
|
|||||||
## variable ENV_FILE can be set to the location of this file prior to starting
|
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||||
## Vaultwarden.
|
## Vaultwarden.
|
||||||
|
|
||||||
|
####################
|
||||||
|
### Data folders ###
|
||||||
|
####################
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
|
# ATTACHMENTS_FOLDER=data/attachments
|
||||||
|
# SENDS_FOLDER=data/sends
|
||||||
|
# TMP_FOLDER=data/tmp
|
||||||
|
|
||||||
|
## Templates data folder, by default uses embedded templates
|
||||||
|
## Check source code to see the format
|
||||||
|
# TEMPLATES_FOLDER=data/templates
|
||||||
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
|
## Web vault settings
|
||||||
|
# WEB_VAULT_FOLDER=web-vault/
|
||||||
|
# WEB_VAULT_ENABLED=true
|
||||||
|
|
||||||
|
#########################
|
||||||
|
### Database settings ###
|
||||||
|
#########################
|
||||||
|
|
||||||
## Database URL
|
## Database URL
|
||||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||||
# DATABASE_URL=data/db.sqlite3
|
# DATABASE_URL=data/db.sqlite3
|
||||||
## When using MySQL, specify an appropriate connection URI.
|
## When using MySQL, specify an appropriate connection URI.
|
||||||
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
||||||
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||||
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||||
## or keyword/value connection string.
|
## or keyword/value connection string.
|
||||||
## Details:
|
## Details:
|
||||||
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
## - https://docs.diesel.rs/2.1.x/diesel/pg/struct.PgConnection.html
|
||||||
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||||
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||||
|
|
||||||
|
## Enable WAL for the DB
|
||||||
|
## Set to false to avoid enabling WAL during startup.
|
||||||
|
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||||
|
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||||
|
## Please read project wiki page about this setting first before changing the value as it can
|
||||||
|
## cause performance degradation or might render the service unable to start.
|
||||||
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
|
## Database connection retries
|
||||||
|
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||||
|
# DB_CONNECTION_RETRIES=15
|
||||||
|
|
||||||
|
## Database timeout
|
||||||
|
## Timeout when acquiring database connection
|
||||||
|
# DATABASE_TIMEOUT=30
|
||||||
|
|
||||||
## Database max connections
|
## Database max connections
|
||||||
## Define the size of the connection pool used for connecting to the database.
|
## Define the size of the connection pool used for connecting to the database.
|
||||||
# DATABASE_MAX_CONNS=10
|
# DATABASE_MAX_CONNS=10
|
||||||
@@ -39,31 +80,9 @@
|
|||||||
## - PostgreSQL: ""
|
## - PostgreSQL: ""
|
||||||
# DATABASE_CONN_INIT=""
|
# DATABASE_CONN_INIT=""
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
#################
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
### WebSocket ###
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
#################
|
||||||
# ATTACHMENTS_FOLDER=data/attachments
|
|
||||||
# SENDS_FOLDER=data/sends
|
|
||||||
# TMP_FOLDER=data/tmp
|
|
||||||
|
|
||||||
## Templates data folder, by default uses embedded templates
|
|
||||||
## Check source code to see the format
|
|
||||||
# TEMPLATES_FOLDER=/path/to/templates
|
|
||||||
## Automatically reload the templates for every request, slow, use only for development
|
|
||||||
# RELOAD_TEMPLATES=false
|
|
||||||
|
|
||||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
|
||||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
|
||||||
# IP_HEADER=X-Real-IP
|
|
||||||
|
|
||||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_TTL=2592000
|
|
||||||
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_NEGTTL=259200
|
|
||||||
|
|
||||||
## Web vault settings
|
|
||||||
# WEB_VAULT_FOLDER=web-vault/
|
|
||||||
# WEB_VAULT_ENABLED=true
|
|
||||||
|
|
||||||
## Enables websocket notifications
|
## Enables websocket notifications
|
||||||
# WEBSOCKET_ENABLED=false
|
# WEBSOCKET_ENABLED=false
|
||||||
@@ -72,30 +91,24 @@
|
|||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||||
# WEBSOCKET_PORT=3012
|
# WEBSOCKET_PORT=3012
|
||||||
|
|
||||||
## Controls whether users are allowed to create Bitwarden Sends.
|
##########################
|
||||||
## This setting applies globally to all users.
|
### Push notifications ###
|
||||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
##########################
|
||||||
# SENDS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether users can enable emergency access to their accounts.
|
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||||
## This setting applies globally to all users.
|
## If you choose "European Union" Data Region, uncomment PUSH_RELAY_URI and PUSH_IDENTITY_URI then replace .com by .eu
|
||||||
# EMERGENCY_ACCESS_ALLOWED=true
|
## Details about mobile client push notification:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
||||||
|
# PUSH_ENABLED=false
|
||||||
|
# PUSH_INSTALLATION_ID=CHANGEME
|
||||||
|
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||||
|
## Don't change this unless you know what you're doing.
|
||||||
|
# PUSH_RELAY_URI=https://push.bitwarden.com
|
||||||
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
||||||
|
|
||||||
## Controls whether event logging is enabled for organizations
|
#####################
|
||||||
## This setting applies to organizations.
|
### Schedule jobs ###
|
||||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
#####################
|
||||||
# ORG_EVENTS_ENABLED=false
|
|
||||||
|
|
||||||
## Number of days to retain events stored in the database.
|
|
||||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
|
||||||
# EVENTS_DAYS_RETAIN=
|
|
||||||
|
|
||||||
## BETA FEATURE: Groups
|
|
||||||
## Controls whether group support is enabled for organizations
|
|
||||||
## This setting applies to organizations.
|
|
||||||
## Disabled by default because this is a beta feature, it contains known issues!
|
|
||||||
## KNOW WHAT YOU ARE DOING!
|
|
||||||
# ORG_GROUPS_ENABLED=false
|
|
||||||
|
|
||||||
## Job scheduler settings
|
## Job scheduler settings
|
||||||
##
|
##
|
||||||
@@ -136,60 +149,69 @@
|
|||||||
## Cron schedule of the job that cleans old events from the event table.
|
## Cron schedule of the job that cleans old events from the event table.
|
||||||
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
||||||
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
||||||
|
## Number of days to retain events stored in the database.
|
||||||
## Enable extended logging, which shows timestamps and targets in the logs
|
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||||
# EXTENDED_LOGGING=true
|
# EVENTS_DAYS_RETAIN=
|
||||||
|
|
||||||
## Timestamp format used in extended logging.
|
|
||||||
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
|
||||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
|
||||||
|
|
||||||
## Logging to file
|
|
||||||
# LOG_FILE=/path/to/log
|
|
||||||
|
|
||||||
## Logging to Syslog
|
|
||||||
## This requires extended logging
|
|
||||||
# USE_SYSLOG=false
|
|
||||||
|
|
||||||
## Log level
|
|
||||||
## Change the verbosity of the log output
|
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
|
||||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
|
||||||
## routes and static file, websocket and alive requests
|
|
||||||
# LOG_LEVEL=Info
|
|
||||||
|
|
||||||
## Enable WAL for the DB
|
|
||||||
## Set to false to avoid enabling WAL during startup.
|
|
||||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
|
||||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
|
||||||
## Please read project wiki page about this setting first before changing the value as it can
|
|
||||||
## cause performance degradation or might render the service unable to start.
|
|
||||||
# ENABLE_DB_WAL=true
|
|
||||||
|
|
||||||
## Database connection retries
|
|
||||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
|
||||||
# DB_CONNECTION_RETRIES=15
|
|
||||||
|
|
||||||
## Icon service
|
|
||||||
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
|
||||||
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
|
||||||
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
|
||||||
##
|
##
|
||||||
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
## Cron schedule of the job that cleans old auth requests from the auth request.
|
||||||
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
## redirect to the corresponding icon at the external service. An external service may
|
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
||||||
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
|
||||||
## you are concerned that someone may probe your instance to try to detect whether icons
|
|
||||||
## for certain sites have been cached.
|
|
||||||
# ICON_SERVICE=internal
|
|
||||||
|
|
||||||
## Icon redirect code
|
########################
|
||||||
## The HTTP status code to use for redirects to an external icon service.
|
### General settings ###
|
||||||
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
########################
|
||||||
## Temporary redirects are useful while testing different icon services, but once a service
|
|
||||||
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
## Domain settings
|
||||||
## are currently better supported by the Bitwarden clients.
|
## The domain must match the address from where you access the server
|
||||||
# ICON_REDIRECT_CODE=302
|
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||||
|
## like attachment downloads, email links and U2F.
|
||||||
|
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||||
|
## To use HTTPS, the recommended way is to put Vaultwarden behind a reverse proxy
|
||||||
|
## Details:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples
|
||||||
|
## For development
|
||||||
|
# DOMAIN=http://localhost
|
||||||
|
## For public server
|
||||||
|
# DOMAIN=https://vw.domain.tld
|
||||||
|
## For public server (URL with port number)
|
||||||
|
# DOMAIN=https://vw.domain.tld:8443
|
||||||
|
## For public server (URL with path)
|
||||||
|
# DOMAIN=https://domain.tld/vw
|
||||||
|
|
||||||
|
## Controls whether users are allowed to create Bitwarden Sends.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||||
|
# SENDS_ALLOWED=true
|
||||||
|
|
||||||
|
## HIBP Api Key
|
||||||
|
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||||
|
# HIBP_API_KEY=
|
||||||
|
|
||||||
|
## Per-organization attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per organization.
|
||||||
|
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||||
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||||
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user send storage limit (KB)
|
||||||
|
## Max kilobytes of send storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further sends.
|
||||||
|
# USER_SEND_LIMIT=
|
||||||
|
|
||||||
|
## Number of days to wait before auto-deleting a trashed item.
|
||||||
|
## If unset (the default), trashed items are not auto-deleted.
|
||||||
|
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||||
|
# TRASH_AUTO_DELETE_DAYS=
|
||||||
|
|
||||||
|
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||||
|
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||||
|
## master password was provided but the required 2FA step was not completed, which
|
||||||
|
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||||
|
|
||||||
## Disable icon downloading
|
## Disable icon downloading
|
||||||
## Set to true to disable icon downloading in the internal icon service.
|
## Set to true to disable icon downloading in the internal icon service.
|
||||||
@@ -198,38 +220,6 @@
|
|||||||
## will be deleted eventually, but won't be downloaded again.
|
## will be deleted eventually, but won't be downloaded again.
|
||||||
# DISABLE_ICON_DOWNLOAD=false
|
# DISABLE_ICON_DOWNLOAD=false
|
||||||
|
|
||||||
## Icon download timeout
|
|
||||||
## Configure the timeout value when downloading the favicons.
|
|
||||||
## The default is 10 seconds, but this could be to low on slower network connections
|
|
||||||
# ICON_DOWNLOAD_TIMEOUT=10
|
|
||||||
|
|
||||||
## Icon blacklist Regex
|
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
|
||||||
## NOTE: Always enclose this regex withing single quotes!
|
|
||||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
|
||||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
|
||||||
|
|
||||||
## Disable 2FA remember
|
|
||||||
## Enabling this would force the users to use a second factor to login every time.
|
|
||||||
## Note that the checkbox would still be present, but ignored.
|
|
||||||
# DISABLE_2FA_REMEMBER=false
|
|
||||||
|
|
||||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
|
||||||
# EMAIL_ATTEMPTS_LIMIT=3
|
|
||||||
|
|
||||||
## Token expiration time
|
|
||||||
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
|
||||||
# EMAIL_EXPIRATION_TIME=600
|
|
||||||
|
|
||||||
## Email token size
|
|
||||||
## Number of digits in an email 2FA token (min: 6, max: 255).
|
|
||||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
|
||||||
# EMAIL_TOKEN_SIZE=6
|
|
||||||
|
|
||||||
## Controls if new users can register
|
## Controls if new users can register
|
||||||
# SIGNUPS_ALLOWED=true
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
@@ -251,6 +241,11 @@
|
|||||||
## even if SIGNUPS_ALLOWED is set to false
|
## even if SIGNUPS_ALLOWED is set to false
|
||||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||||
|
|
||||||
|
## Controls whether event logging is enabled for organizations
|
||||||
|
## This setting applies to organizations.
|
||||||
|
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||||
|
# ORG_EVENTS_ENABLED=false
|
||||||
|
|
||||||
## Controls which users can create new orgs.
|
## Controls which users can create new orgs.
|
||||||
## Blank or 'all' means all users can create orgs (this is the default):
|
## Blank or 'all' means all users can create orgs (this is the default):
|
||||||
# ORG_CREATION_USERS=
|
# ORG_CREATION_USERS=
|
||||||
@@ -259,19 +254,6 @@
|
|||||||
## A comma-separated list means only those users can create orgs:
|
## A comma-separated list means only those users can create orgs:
|
||||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||||
|
|
||||||
## Token for the admin interface, preferably an Argon2 PCH string
|
|
||||||
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
|
||||||
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
|
||||||
## If not set, the admin panel is disabled
|
|
||||||
## New Argon2 PHC string
|
|
||||||
# ADMIN_TOKEN='$argon2id$v=19$m=65540,t=3,p=4$MmeKRnGK5RW5mJS7h3TOL89GrpLPXJPAtTK8FTqj9HM$DqsstvoSAETl9YhnsXbf43WeaUwJC6JhViIvuPoig78'
|
|
||||||
## Old plain text string (Will generate warnings in favor of Argon2)
|
|
||||||
# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp
|
|
||||||
|
|
||||||
## Enable this to bypass the admin panel security. This option is only
|
|
||||||
## meant to be used with the use of a separate auth layer in front
|
|
||||||
# DISABLE_ADMIN_TOKEN=false
|
|
||||||
|
|
||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
# INVITATIONS_ALLOWED=true
|
# INVITATIONS_ALLOWED=true
|
||||||
## Name shown in the invitation emails that don't come from a specific organization
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
@@ -281,30 +263,17 @@
|
|||||||
## email verification token and deletion request token will expire (must be at least 1)
|
## email verification token and deletion request token will expire (must be at least 1)
|
||||||
# INVITATION_EXPIRATION_HOURS=120
|
# INVITATION_EXPIRATION_HOURS=120
|
||||||
|
|
||||||
## Per-organization attachment storage limit (KB)
|
## Controls whether users can enable emergency access to their accounts.
|
||||||
## Max kilobytes of attachment storage allowed per organization.
|
|
||||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
|
||||||
# ORG_ATTACHMENT_LIMIT=
|
|
||||||
## Per-user attachment storage limit (KB)
|
|
||||||
## Max kilobytes of attachment storage allowed per user.
|
|
||||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
|
||||||
# USER_ATTACHMENT_LIMIT=
|
|
||||||
|
|
||||||
## Number of days to wait before auto-deleting a trashed item.
|
|
||||||
## If unset (the default), trashed items are not auto-deleted.
|
|
||||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
|
||||||
# TRASH_AUTO_DELETE_DAYS=
|
|
||||||
|
|
||||||
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
|
||||||
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
|
||||||
## master password was provided but the required 2FA step was not completed, which
|
|
||||||
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
|
||||||
## This setting applies globally to all users.
|
## This setting applies globally to all users.
|
||||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
# EMERGENCY_ACCESS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether users can change their email.
|
||||||
|
## This setting applies globally to all users
|
||||||
|
# EMAIL_CHANGE_ALLOWED=true
|
||||||
|
|
||||||
## Number of server-side passwords hashing iterations for the password hash.
|
## Number of server-side passwords hashing iterations for the password hash.
|
||||||
## The default for new users. If changed, it will be updated during login for existing users.
|
## The default for new users. If changed, it will be updated during login for existing users.
|
||||||
# PASSWORD_ITERATIONS=350000
|
# PASSWORD_ITERATIONS=600000
|
||||||
|
|
||||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||||
# PASSWORD_HINTS_ALLOWED=true
|
# PASSWORD_HINTS_ALLOWED=true
|
||||||
@@ -314,12 +283,115 @@
|
|||||||
## as this provides unauthenticated access to potentially sensitive data.
|
## as this provides unauthenticated access to potentially sensitive data.
|
||||||
# SHOW_PASSWORD_HINT=false
|
# SHOW_PASSWORD_HINT=false
|
||||||
|
|
||||||
## Domain settings
|
#########################
|
||||||
## The domain must match the address from where you access the server
|
### Advanced settings ###
|
||||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
#########################
|
||||||
## like attachment downloads, email links and U2F.
|
|
||||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||||
# DOMAIN=https://vw.domain.tld:8443
|
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
|
# IP_HEADER=X-Real-IP
|
||||||
|
|
||||||
|
## Icon service
|
||||||
|
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||||
|
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||||
|
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||||
|
##
|
||||||
|
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||||
|
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||||
|
## redirect to the corresponding icon at the external service. An external service may
|
||||||
|
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||||
|
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||||
|
## for certain sites have been cached.
|
||||||
|
# ICON_SERVICE=internal
|
||||||
|
|
||||||
|
## Icon redirect code
|
||||||
|
## The HTTP status code to use for redirects to an external icon service.
|
||||||
|
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||||
|
## Temporary redirects are useful while testing different icon services, but once a service
|
||||||
|
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||||
|
## are currently better supported by the Bitwarden clients.
|
||||||
|
# ICON_REDIRECT_CODE=302
|
||||||
|
|
||||||
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (30 days)
|
||||||
|
# ICON_CACHE_TTL=2592000
|
||||||
|
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (3 days)
|
||||||
|
# ICON_CACHE_NEGTTL=259200
|
||||||
|
|
||||||
|
## Icon download timeout
|
||||||
|
## Configure the timeout value when downloading the favicons.
|
||||||
|
## The default is 10 seconds, but this could be to low on slower network connections
|
||||||
|
# ICON_DOWNLOAD_TIMEOUT=10
|
||||||
|
|
||||||
|
## Icon blacklist Regex
|
||||||
|
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
|
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
|
## Any IP which is not defined as a global IP will be blacklisted.
|
||||||
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
|
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
|
## Client Settings
|
||||||
|
## Enable experimental feature flags for clients.
|
||||||
|
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
|
||||||
|
##
|
||||||
|
## The following flags are available:
|
||||||
|
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
|
||||||
|
## - "autofill-v2": Use the new autofill implementation.
|
||||||
|
## - "browser-fileless-import": Directly import credentials from other providers without a file.
|
||||||
|
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
|
||||||
|
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||||
|
|
||||||
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
|
## If sending the email fails the login attempt will fail!!
|
||||||
|
# REQUIRE_DEVICE_EMAIL=false
|
||||||
|
|
||||||
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
|
## Timestamp format used in extended logging.
|
||||||
|
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||||
|
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||||
|
|
||||||
|
## Logging to Syslog
|
||||||
|
## This requires extended logging
|
||||||
|
# USE_SYSLOG=false
|
||||||
|
|
||||||
|
## Logging to file
|
||||||
|
# LOG_FILE=/path/to/log
|
||||||
|
|
||||||
|
## Log level
|
||||||
|
## Change the verbosity of the log output
|
||||||
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
|
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||||
|
## routes and static file, websocket and alive requests
|
||||||
|
# LOG_LEVEL=info
|
||||||
|
|
||||||
|
## Token for the admin interface, preferably an Argon2 PCH string
|
||||||
|
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
||||||
|
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
||||||
|
## If not set, the admin panel is disabled
|
||||||
|
## New Argon2 PHC string
|
||||||
|
## Note that for some environments, like docker-compose you need to escape all the dollar signs `$` with an extra dollar sign like `$$`
|
||||||
|
## Also, use single quotes (') instead of double quotes (") to enclose the string when needed
|
||||||
|
# ADMIN_TOKEN='$argon2id$v=19$m=65540,t=3,p=4$MmeKRnGK5RW5mJS7h3TOL89GrpLPXJPAtTK8FTqj9HM$DqsstvoSAETl9YhnsXbf43WeaUwJC6JhViIvuPoig78'
|
||||||
|
## Old plain text string (Will generate warnings in favor of Argon2)
|
||||||
|
# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp
|
||||||
|
|
||||||
|
## Enable this to bypass the admin panel security. This option is only
|
||||||
|
## meant to be used with the use of a separate auth layer in front
|
||||||
|
# DISABLE_ADMIN_TOKEN=false
|
||||||
|
|
||||||
|
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||||
|
# ADMIN_RATELIMIT_SECONDS=300
|
||||||
|
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||||
|
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||||
|
|
||||||
|
## Set the lifetime of admin sessions to this value (in minutes).
|
||||||
|
# ADMIN_SESSION_LIFETIME=20
|
||||||
|
|
||||||
## Allowed iframe ancestors (Know the risks!)
|
## Allowed iframe ancestors (Know the risks!)
|
||||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||||
@@ -334,13 +406,16 @@
|
|||||||
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||||
# LOGIN_RATELIMIT_MAX_BURST=10
|
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||||
|
|
||||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
## BETA FEATURE: Groups
|
||||||
# ADMIN_RATELIMIT_SECONDS=300
|
## Controls whether group support is enabled for organizations
|
||||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
## This setting applies to organizations.
|
||||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
## Disabled by default because this is a beta feature, it contains known issues!
|
||||||
|
## KNOW WHAT YOU ARE DOING!
|
||||||
|
# ORG_GROUPS_ENABLED=false
|
||||||
|
|
||||||
## Set the lifetime of admin sessions to this value (in minutes).
|
########################
|
||||||
# ADMIN_SESSION_LIFETIME=20
|
### MFA/2FA settings ###
|
||||||
|
########################
|
||||||
|
|
||||||
## Yubico (Yubikey) Settings
|
## Yubico (Yubikey) Settings
|
||||||
## Set your Client ID and Secret Key for Yubikey OTP
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
@@ -361,6 +436,25 @@
|
|||||||
## After that, you should be able to follow the rest of the guide linked above,
|
## After that, you should be able to follow the rest of the guide linked above,
|
||||||
## ignoring the fields that ask for the values that you already configured beforehand.
|
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||||
|
|
||||||
|
## Email 2FA settings
|
||||||
|
## Email token size
|
||||||
|
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||||
|
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||||
|
# EMAIL_TOKEN_SIZE=6
|
||||||
|
##
|
||||||
|
## Token expiration time
|
||||||
|
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||||
|
# EMAIL_EXPIRATION_TIME=600
|
||||||
|
##
|
||||||
|
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||||
|
# EMAIL_ATTEMPTS_LIMIT=3
|
||||||
|
|
||||||
|
## Other MFA/2FA settings
|
||||||
|
## Disable 2FA remember
|
||||||
|
## Enabling this would force the users to use a second factor to login every time.
|
||||||
|
## Note that the checkbox would still be present, but ignored.
|
||||||
|
# DISABLE_2FA_REMEMBER=false
|
||||||
|
##
|
||||||
## Authenticator Settings
|
## Authenticator Settings
|
||||||
## Disable authenticator time drifted codes to be valid.
|
## Disable authenticator time drifted codes to be valid.
|
||||||
## TOTP codes of the previous and next 30 seconds will be invalid
|
## TOTP codes of the previous and next 30 seconds will be invalid
|
||||||
@@ -373,12 +467,9 @@
|
|||||||
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
||||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
||||||
|
|
||||||
## Rocket specific settings
|
###########################
|
||||||
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
|
### SMTP Email settings ###
|
||||||
# ROCKET_ADDRESS=0.0.0.0
|
###########################
|
||||||
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
|
||||||
# ROCKET_WORKERS=10
|
|
||||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
|
||||||
|
|
||||||
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
||||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||||
@@ -400,7 +491,7 @@
|
|||||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||||
## Multiple options need to be separated by a comma ','.
|
## Multiple options need to be separated by a comma ','.
|
||||||
# SMTP_AUTH_MECHANISM="Plain"
|
# SMTP_AUTH_MECHANISM=
|
||||||
|
|
||||||
## Server name sent during the SMTP HELO
|
## Server name sent during the SMTP HELO
|
||||||
## By default this value should be is on the machine's hostname,
|
## By default this value should be is on the machine's hostname,
|
||||||
@@ -408,30 +499,33 @@
|
|||||||
# HELO_NAME=
|
# HELO_NAME=
|
||||||
|
|
||||||
## Embed images as email attachments
|
## Embed images as email attachments
|
||||||
# SMTP_EMBED_IMAGES=false
|
# SMTP_EMBED_IMAGES=true
|
||||||
|
|
||||||
## SMTP debugging
|
## SMTP debugging
|
||||||
## When set to true this will output very detailed SMTP messages.
|
## When set to true this will output very detailed SMTP messages.
|
||||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
# SMTP_DEBUG=false
|
# SMTP_DEBUG=false
|
||||||
|
|
||||||
## Accept Invalid Hostnames
|
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
|
||||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
|
||||||
|
|
||||||
## Accept Invalid Certificates
|
## Accept Invalid Certificates
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||||
# SMTP_ACCEPT_INVALID_CERTS=false
|
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||||
|
|
||||||
## Require new device emails. When a user logs in an email is required to be sent.
|
## Accept Invalid Hostnames
|
||||||
## If sending the email fails the login attempt will fail!!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
# REQUIRE_DEVICE_EMAIL=false
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
|
##########################
|
||||||
|
### Rocket settings ###
|
||||||
|
##########################
|
||||||
|
|
||||||
|
## Rocket specific settings
|
||||||
|
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
|
||||||
|
# ROCKET_ADDRESS=0.0.0.0
|
||||||
|
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
||||||
|
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||||
|
|
||||||
## HIBP Api Key
|
|
||||||
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
|
||||||
# HIBP_API_KEY=
|
|
||||||
|
|
||||||
# vim: syntax=ini
|
# vim: syntax=ini
|
||||||
|
3
.github/CODEOWNERS
vendored
Normal file
3
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
/.github @dani-garcia @BlackDex
|
||||||
|
/.github/CODEOWNERS @dani-garcia @BlackDex
|
||||||
|
/.github/workflows/** @dani-garcia @BlackDex
|
75
.github/workflows/build.yml
vendored
75
.github/workflows/build.yml
vendored
@@ -8,9 +8,11 @@ on:
|
|||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- ".github/workflows/build.yml"
|
- ".github/workflows/build.yml"
|
||||||
@@ -18,19 +20,20 @@ on:
|
|||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
# Make warnings errors, this is to prevent warnings slipping through.
|
# Make warnings errors, this is to prevent warnings slipping through.
|
||||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||||
env:
|
env:
|
||||||
RUSTFLAGS: "-D warnings"
|
RUSTFLAGS: "-D warnings"
|
||||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: git # Use the old git protocol until it is stable probably in 1.68 or 1.69. MSRV needs to be at this before removed.
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -43,13 +46,13 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: "Checkout"
|
- name: "Checkout"
|
||||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
- name: "Install dependencies Ubuntu"
|
- name: "Install dependencies Ubuntu"
|
||||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||||
# End Install dependencies
|
# End Install dependencies
|
||||||
|
|
||||||
|
|
||||||
@@ -59,7 +62,7 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)"
|
||||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||||
else
|
else
|
||||||
@@ -71,7 +74,7 @@ jobs:
|
|||||||
|
|
||||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||||
- name: "Install rust-toolchain version"
|
- name: "Install rust-toolchain version"
|
||||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d # master @ 2023-03-21 - 06:36 GMT+1
|
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
@@ -81,17 +84,19 @@ jobs:
|
|||||||
|
|
||||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||||
- name: "Install MSRV version"
|
- name: "Install MSRV version"
|
||||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d # master @ 2023-03-21 - 06:36 GMT+1
|
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
|
||||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
# End Install the MSRV channel to be used
|
# End Install the MSRV channel to be used
|
||||||
|
|
||||||
|
# Set the current matrix toolchain version as default
|
||||||
# Enable Rust Caching
|
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||||
- uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1
|
run: |
|
||||||
# End Enable Rust Caching
|
# Remove the rust-toolchain.toml
|
||||||
|
rm rust-toolchain.toml
|
||||||
|
# Set the default
|
||||||
|
rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||||
|
|
||||||
# Show environment
|
# Show environment
|
||||||
- name: "Show environment"
|
- name: "Show environment"
|
||||||
@@ -100,47 +105,55 @@ jobs:
|
|||||||
cargo -vV
|
cargo -vV
|
||||||
# End Show environment
|
# End Show environment
|
||||||
|
|
||||||
|
# Enable Rust Caching
|
||||||
|
- uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3
|
||||||
|
with:
|
||||||
|
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||||
|
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||||
|
# Only update when really needed! Use a <year>.<month>[.<inc>] format.
|
||||||
|
prefix-key: "v2023.07-rust"
|
||||||
|
# End Enable Rust Caching
|
||||||
|
|
||||||
# Run cargo tests (In release mode to speed up future builds)
|
# Run cargo tests
|
||||||
# First test all features together, afterwards test them separately.
|
# First test all features together, afterwards test them separately.
|
||||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: test_sqlite_mysql_postgresql_mimalloc
|
id: test_sqlite_mysql_postgresql_mimalloc
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql,enable_mimalloc
|
cargo test --features sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
- name: "test features: sqlite,mysql,postgresql"
|
- name: "test features: sqlite,mysql,postgresql"
|
||||||
id: test_sqlite_mysql_postgresql
|
id: test_sqlite_mysql_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql
|
cargo test --features sqlite,mysql,postgresql
|
||||||
|
|
||||||
- name: "test features: sqlite"
|
- name: "test features: sqlite"
|
||||||
id: test_sqlite
|
id: test_sqlite
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite
|
cargo test --features sqlite
|
||||||
|
|
||||||
- name: "test features: mysql"
|
- name: "test features: mysql"
|
||||||
id: test_mysql
|
id: test_mysql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features mysql
|
cargo test --features mysql
|
||||||
|
|
||||||
- name: "test features: postgresql"
|
- name: "test features: postgresql"
|
||||||
id: test_postgresql
|
id: test_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features postgresql
|
cargo test --features postgresql
|
||||||
# End Run cargo tests
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
# Run cargo clippy, and fail on warnings
|
||||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: clippy
|
id: clippy
|
||||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||||
run: |
|
run: |
|
||||||
cargo clippy --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||||
# End Run cargo clippy
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
@@ -182,21 +195,3 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
|
||||||
# Build the binary to upload to the artifacts
|
|
||||||
- name: "build features: sqlite,mysql,postgresql"
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
run: |
|
|
||||||
cargo build --release --features sqlite,mysql,postgresql
|
|
||||||
# End Build the binary
|
|
||||||
|
|
||||||
|
|
||||||
# Upload artifact to Github Actions
|
|
||||||
- name: "Upload artifact"
|
|
||||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
with:
|
|
||||||
name: vaultwarden
|
|
||||||
path: target/release/vaultwarden
|
|
||||||
# End Upload artifact to Github Actions
|
|
||||||
|
7
.github/workflows/hadolint.yml
vendored
7
.github/workflows/hadolint.yml
vendored
@@ -8,15 +8,14 @@ on: [
|
|||||||
jobs:
|
jobs:
|
||||||
hadolint:
|
hadolint:
|
||||||
name: Validate Dockerfile syntax
|
name: Validate Dockerfile syntax
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
- name: Download hadolint
|
- name: Download hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -30,5 +29,5 @@ jobs:
|
|||||||
# Test Dockerfiles
|
# Test Dockerfiles
|
||||||
- name: Run hadolint
|
- name: Run hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored --cached | xargs hadolint
|
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||||
# End Test Dockerfiles
|
# End Test Dockerfiles
|
||||||
|
277
.github/workflows/release.yml
vendored
277
.github/workflows/release.yml
vendored
@@ -6,12 +6,11 @@ on:
|
|||||||
- ".github/workflows/release.yml"
|
- ".github/workflows/release.yml"
|
||||||
- "src/**"
|
- "src/**"
|
||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "hooks/**"
|
|
||||||
- "docker/**"
|
- "docker/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
|
|
||||||
branches: # Only on paths above
|
branches: # Only on paths above
|
||||||
- main
|
- main
|
||||||
@@ -24,34 +23,31 @@ jobs:
|
|||||||
# Some checks to determine if we need to continue with building a new docker.
|
# Some checks to determine if we need to continue with building a new docker.
|
||||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||||
skip_check:
|
skip_check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
outputs:
|
outputs:
|
||||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||||
steps:
|
steps:
|
||||||
- name: Skip Duplicates Actions
|
- name: Skip Duplicates Actions
|
||||||
id: skip_check
|
id: skip_check
|
||||||
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
|
uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
|
||||||
with:
|
with:
|
||||||
cancel_others: 'true'
|
cancel_others: 'true'
|
||||||
# Only run this when not creating a tag
|
# Only run this when not creating a tag
|
||||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
if: ${{ github.ref_type == 'branch' }}
|
||||||
|
|
||||||
docker-build:
|
docker-build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
needs: skip_check
|
needs: skip_check
|
||||||
# Start a local docker registry to be used to generate multi-arch images.
|
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
|
# Start a local docker registry to extract the final Alpine static build binaries
|
||||||
services:
|
services:
|
||||||
registry:
|
registry:
|
||||||
image: registry:2
|
image: registry:2
|
||||||
ports:
|
ports:
|
||||||
- 5000:5000
|
- 5000:5000
|
||||||
env:
|
env:
|
||||||
# Use BuildKit (https://docs.docker.com/build/buildkit/) for better
|
|
||||||
# build performance and the ability to copy extended file attributes
|
|
||||||
# (e.g., for executable capabilities) across build phases.
|
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
SOURCE_COMMIT: ${{ github.sha }}
|
SOURCE_COMMIT: ${{ github.sha }}
|
||||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||||
# The *_REPO variables need to be configured as repository variables
|
# The *_REPO variables need to be configured as repository variables
|
||||||
@@ -65,7 +61,6 @@ jobs:
|
|||||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||||
# Check for Quay.io credentials in secrets
|
# Check for Quay.io credentials in secrets
|
||||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
base_image: ["debian","alpine"]
|
base_image: ["debian","alpine"]
|
||||||
@@ -73,163 +68,201 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Determine Docker Tag
|
- name: Initialize QEMU binfmt support
|
||||||
- name: Init Variables
|
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
||||||
id: vars
|
with:
|
||||||
|
platforms: "arm64,arm"
|
||||||
|
|
||||||
|
# Start Docker Buildx
|
||||||
|
- name: Setup Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||||
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
|
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
||||||
|
with:
|
||||||
|
config-inline: |
|
||||||
|
[worker.oci]
|
||||||
|
max-parallelism = 2
|
||||||
|
driver-opts: |
|
||||||
|
network=host
|
||||||
|
|
||||||
|
# Determine Base Tags and Source Version
|
||||||
|
- name: Determine Base Tags and Source Version
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# Check which main tag we are going to build determined by github.ref
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
echo "DOCKER_TAG=${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
|
||||||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
echo "DOCKER_TAG=testing" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
|
||||||
fi
|
fi
|
||||||
# End Determine Docker Tag
|
|
||||||
|
# Get the Source Version for this release
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null || true)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
echo "SOURCE_VERSION=${GIT_EXACT_TAG}" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
echo "SOURCE_VERSION=${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}" | tee -a "${GITHUB_ENV}"
|
||||||
|
fi
|
||||||
|
# End Determine Base Tags
|
||||||
|
|
||||||
# Login to Docker Hub
|
# Login to Docker Hub
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for DockerHub
|
||||||
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to GitHub Container Registry
|
# Login to GitHub Container Registry
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for ghcr.io
|
||||||
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
- name: Add registry for ghcr.io
|
||||||
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to Quay.io
|
# Login to Quay.io
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.QUAY_USERNAME }}
|
||||||
password: ${{ secrets.QUAY_TOKEN }}
|
password: ${{ secrets.QUAY_TOKEN }}
|
||||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
|
|
||||||
# Debian
|
- name: Add registry for Quay.io
|
||||||
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
# Docker Hub
|
|
||||||
- name: Build Debian based images (docker.io)
|
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/build
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (docker.io)
|
- name: Configure build cache from/to
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/push
|
#
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
# Check if there is a GitHub Container Registry Login and use it for caching
|
||||||
|
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
||||||
|
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},mode=max" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
echo "BAKE_CACHE_FROM="
|
||||||
|
echo "BAKE_CACHE_TO="
|
||||||
|
fi
|
||||||
|
#
|
||||||
|
|
||||||
# GitHub Container Registry
|
- name: Add localhost registry
|
||||||
- name: Build Debian based images (ghcr.io)
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/build
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (ghcr.io)
|
- name: Bake ${{ matrix.base_image }} containers
|
||||||
|
uses: docker/bake-action@849707117b03d39aba7924c50a10376a69e88d7d # v4.1.0
|
||||||
|
env:
|
||||||
|
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||||
|
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||||
|
SOURCE_VERSION: "${{ env.SOURCE_VERSION }}"
|
||||||
|
SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}"
|
||||||
|
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
|
||||||
|
with:
|
||||||
|
pull: true
|
||||||
|
push: true
|
||||||
|
files: docker/docker-bake.hcl
|
||||||
|
targets: "${{ matrix.base_image }}-multi"
|
||||||
|
set: |
|
||||||
|
*.cache-from=${{ env.BAKE_CACHE_FROM }}
|
||||||
|
*.cache-to=${{ env.BAKE_CACHE_TO }}
|
||||||
|
|
||||||
|
|
||||||
|
# Extract the Alpine binaries from the containers
|
||||||
|
- name: Extract binaries
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/push
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
|
EXTRACT_TAG="latest"
|
||||||
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
|
EXTRACT_TAG="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
# Quay.io
|
# After each extraction the image is removed.
|
||||||
- name: Build Debian based images (quay.io)
|
# This is needed because using different platforms doesn't trigger a new pull/download
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (quay.io)
|
# Extract amd64 binary
|
||||||
shell: bash
|
docker create --name amd64 --platform=linux/amd64 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
env:
|
docker cp amd64:/vaultwarden vaultwarden-amd64
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
docker rm --force amd64
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Alpine
|
# Extract arm64 binary
|
||||||
|
docker create --name arm64 --platform=linux/arm64 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
docker cp arm64:/vaultwarden vaultwarden-arm64
|
||||||
|
docker rm --force arm64
|
||||||
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
|
||||||
# Docker Hub
|
# Extract armv7 binary
|
||||||
- name: Build Alpine based images (docker.io)
|
docker create --name armv7 --platform=linux/arm/v7 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
shell: bash
|
docker cp armv7:/vaultwarden vaultwarden-armv7
|
||||||
env:
|
docker rm --force armv7
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (docker.io)
|
# Extract armv6 binary
|
||||||
shell: bash
|
docker create --name armv6 --platform=linux/arm/v6 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
env:
|
docker cp armv6:/vaultwarden vaultwarden-armv6
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
docker rm --force armv6
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# GitHub Container Registry
|
# Upload artifacts to Github Actions
|
||||||
- name: Build Alpine based images (ghcr.io)
|
- name: "Upload amd64 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
||||||
run: |
|
path: vaultwarden-amd64
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (ghcr.io)
|
- name: "Upload arm64 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
||||||
run: |
|
path: vaultwarden-arm64
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Quay.io
|
- name: "Upload armv7 artifact"
|
||||||
- name: Build Alpine based images (quay.io)
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
shell: bash
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
env:
|
with:
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
path: vaultwarden-armv7
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (quay.io)
|
- name: "Upload armv6 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
||||||
run: |
|
path: vaultwarden-armv6
|
||||||
./hooks/push
|
# End Upload artifacts to Github Actions
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
25
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
25
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
manual_trigger:
|
||||||
|
description: "Manual trigger buildcache cleanup"
|
||||||
|
required: false
|
||||||
|
default: ""
|
||||||
|
|
||||||
|
schedule:
|
||||||
|
- cron: '0 1 * * FRI'
|
||||||
|
|
||||||
|
name: Cleanup
|
||||||
|
jobs:
|
||||||
|
releasecache-cleanup:
|
||||||
|
name: Releasecache Cleanup
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
steps:
|
||||||
|
- name: Delete vaultwarden-buildcache containers
|
||||||
|
uses: actions/delete-package-versions@0d39a63126868f5eefaa47169615edd3c0f61e20 # v4.1.1
|
||||||
|
with:
|
||||||
|
package-name: 'vaultwarden-buildcache'
|
||||||
|
package-type: 'container'
|
||||||
|
min-versions-to-keep: 0
|
||||||
|
delete-only-untagged-versions: 'false'
|
42
.github/workflows/trivy.yml
vendored
Normal file
42
.github/workflows/trivy.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: trivy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
pull_request:
|
||||||
|
branches: [ "main" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '00 12 * * *'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
trivy-scan:
|
||||||
|
name: Check
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
actions: read
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
||||||
|
|
||||||
|
- name: Run Trivy vulnerability scanner
|
||||||
|
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # v0.16.1
|
||||||
|
with:
|
||||||
|
scan-type: repo
|
||||||
|
ignore-unfixed: true
|
||||||
|
format: sarif
|
||||||
|
output: trivy-results.sarif
|
||||||
|
severity: CRITICAL,HIGH
|
||||||
|
|
||||||
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
|
uses: github/codeql-action/upload-sarif@b7bf0a3ed3ecfa44160715d7c442788f65f0f923 # v3.23.2
|
||||||
|
with:
|
||||||
|
sarif_file: 'trivy-results.sarif'
|
@@ -1,9 +1,13 @@
|
|||||||
ignored:
|
ignored:
|
||||||
|
# To prevent issues and make clear some images only work on linux/amd64, we ignore this
|
||||||
|
- DL3029
|
||||||
# disable explicit version for apt install
|
# disable explicit version for apt install
|
||||||
- DL3008
|
- DL3008
|
||||||
# disable explicit version for apk install
|
# disable explicit version for apk install
|
||||||
- DL3018
|
- DL3018
|
||||||
# disable check for consecutive `RUN` instructions
|
# Ignore shellcheck info message
|
||||||
- DL3059
|
- SC1091
|
||||||
trustedRegistries:
|
trustedRegistries:
|
||||||
- docker.io
|
- docker.io
|
||||||
|
- ghcr.io
|
||||||
|
- quay.io
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.4.0
|
rev: v4.5.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
- id: check-json
|
- id: check-json
|
||||||
|
2063
Cargo.lock
generated
2063
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
119
Cargo.toml
119
Cargo.toml
@@ -3,7 +3,7 @@ name = "vaultwarden"
|
|||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.66.1"
|
rust-version = "1.73.0"
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
@@ -36,73 +36,75 @@ unstable = []
|
|||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(not(windows))".dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
syslog = "6.0.1" # Needs to be v4 until fern is updated
|
syslog = "6.1.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.17"
|
log = "0.4.20"
|
||||||
fern = { version = "0.6.2", features = ["syslog-6"] }
|
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
||||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenvy = { version = "0.15.7", default-features = false }
|
dotenvy = { version = "0.15.7", default-features = false }
|
||||||
|
|
||||||
# Lazy initialization
|
# Lazy initialization
|
||||||
once_cell = "1.17.1"
|
once_cell = "1.19.0"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.17"
|
||||||
num-derive = "0.3.3"
|
num-derive = "0.4.1"
|
||||||
|
bigdecimal = "0.4.2"
|
||||||
|
|
||||||
# Web framework
|
# Web framework
|
||||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
rocket = { version = "0.5.0", features = ["tls", "json"], default-features = false }
|
||||||
|
rocket_ws = { version ="0.1.0" }
|
||||||
|
|
||||||
# WebSockets libraries
|
# WebSockets libraries
|
||||||
tokio-tungstenite = "0.18.0"
|
tokio-tungstenite = "0.20.1"
|
||||||
rmpv = "1.0.0" # MessagePack library
|
rmpv = "1.0.1" # MessagePack library
|
||||||
|
|
||||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||||
dashmap = "5.4.0"
|
dashmap = "5.5.3"
|
||||||
|
|
||||||
# Async futures
|
# Async futures
|
||||||
futures = "0.3.27"
|
futures = "0.3.30"
|
||||||
tokio = { version = "1.26.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
tokio = { version = "1.35.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.158", features = ["derive"] }
|
serde = { version = "1.0.195", features = ["derive"] }
|
||||||
serde_json = "1.0.94"
|
serde_json = "1.0.111"
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "2.0.3", features = ["chrono", "r2d2"] }
|
diesel = { version = "2.1.4", features = ["chrono", "r2d2", "numeric"] }
|
||||||
diesel_migrations = "2.0.0"
|
diesel_migrations = "2.1.0"
|
||||||
diesel_logger = { version = "0.2.0", optional = true }
|
diesel_logger = { version = "0.3.0", optional = true }
|
||||||
|
|
||||||
# Bundled/Static SQLite
|
# Bundled/Static SQLite
|
||||||
libsqlite3-sys = { version = "0.25.2", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.27.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||||
ring = "0.16.20"
|
ring = "0.17.7"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "1.3.0", features = ["v4"] }
|
uuid = { version = "1.7.0", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.24", features = ["clock", "serde"], default-features = false }
|
chrono = { version = "0.4.33", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.8.1"
|
chrono-tz = "0.8.5"
|
||||||
time = "0.3.20"
|
time = "0.3.31"
|
||||||
|
|
||||||
# Job scheduler
|
# Job scheduler
|
||||||
job_scheduler_ng = "2.0.4"
|
job_scheduler_ng = "2.0.4"
|
||||||
|
|
||||||
# Data encoding library Hex/Base32/Base64
|
# Data encoding library Hex/Base32/Base64
|
||||||
data-encoding = "2.3.3"
|
data-encoding = "2.5.0"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "8.3.0"
|
jsonwebtoken = "9.2.0"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
totp-lite = "2.0.0"
|
totp-lite = "2.0.1"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||||
@@ -111,67 +113,80 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
|||||||
webauthn-rs = "0.3.2"
|
webauthn-rs = "0.3.2"
|
||||||
|
|
||||||
# Handling of URL's for WebAuthn and favicons
|
# Handling of URL's for WebAuthn and favicons
|
||||||
url = "2.3.1"
|
url = "2.5.0"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.10.3", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
lettre = { version = "0.11.3", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||||
percent-encoding = "2.2.0" # URL encoding library used for URL's in the emails
|
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||||
email_address = "0.2.4"
|
email_address = "0.2.4"
|
||||||
|
|
||||||
# HTML Template library
|
# HTML Template library
|
||||||
handlebars = { version = "4.3.6", features = ["dir_source"] }
|
handlebars = { version = "5.1.1", features = ["dir_source"] }
|
||||||
|
|
||||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||||
reqwest = { version = "0.11.15", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
reqwest = { version = "0.11.23", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
|
||||||
|
|
||||||
# Favicon extraction libraries
|
# Favicon extraction libraries
|
||||||
html5gum = "0.5.2"
|
html5gum = "0.5.7"
|
||||||
regex = { version = "1.7.3", features = ["std", "perf", "unicode-perl"], default-features = false }
|
regex = { version = "1.10.3", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
data-url = "0.2.0"
|
data-url = "0.3.1"
|
||||||
bytes = "1.4.0"
|
bytes = "1.5.0"
|
||||||
|
|
||||||
# Cache function results (Used for version check and favicon fetching)
|
# Cache function results (Used for version check and favicon fetching)
|
||||||
cached = "0.42.0"
|
cached = { version = "0.48.1", features = ["async"] }
|
||||||
|
|
||||||
# Used for custom short lived cookie jar during favicon extraction
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
cookie = "0.16.2"
|
cookie = "0.16.2"
|
||||||
cookie_store = "0.19.0"
|
cookie_store = "0.19.1"
|
||||||
|
|
||||||
# Used by U2F, JWT and PostgreSQL
|
# Used by U2F, JWT and PostgreSQL
|
||||||
openssl = "0.10.48"
|
openssl = "0.10.63"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
pico-args = "0.5.0"
|
pico-args = "0.5.0"
|
||||||
|
|
||||||
# Macro ident concatenation
|
# Macro ident concatenation
|
||||||
paste = "1.0.12"
|
paste = "1.0.14"
|
||||||
governor = "0.5.1"
|
governor = "0.6.0"
|
||||||
|
|
||||||
# Check client versions for specific features.
|
# Check client versions for specific features.
|
||||||
semver = "1.0.17"
|
semver = "1.0.21"
|
||||||
|
|
||||||
# Allow overriding the default memory allocator
|
# Allow overriding the default memory allocator
|
||||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
mimalloc = { version = "0.1.34", features = ["secure"], default-features = false, optional = true }
|
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
|
||||||
which = "4.4.0"
|
which = "6.0.0"
|
||||||
|
|
||||||
# Argon2 library with support for the PHC format
|
# Argon2 library with support for the PHC format
|
||||||
argon2 = "0.5.0"
|
argon2 = "0.5.3"
|
||||||
|
|
||||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||||
rpassword = "7.2.0"
|
rpassword = "7.3.1"
|
||||||
|
|
||||||
|
|
||||||
# Strip debuginfo from the release builds
|
# Strip debuginfo from the release builds
|
||||||
# Also enable thin LTO for some optimizations
|
# The symbols are the provide better panic traces
|
||||||
|
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||||
[profile.release]
|
[profile.release]
|
||||||
strip = "debuginfo"
|
strip = "debuginfo"
|
||||||
lto = "thin"
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
|
||||||
|
|
||||||
|
# A little bit of a speedup
|
||||||
|
[profile.dev]
|
||||||
|
split-debuginfo = "unpacked"
|
||||||
|
|
||||||
# Always build argon2 using opt-level 3
|
# Always build argon2 using opt-level 3
|
||||||
# This is a huge speed improvement during testing
|
# This is a huge speed improvement during testing
|
||||||
[profile.dev.package.argon2]
|
[profile.dev.package.argon2]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
|
|
||||||
# A little bit of a speedup
|
# Optimize for size
|
||||||
[profile.dev]
|
[profile.release-micro]
|
||||||
split-debuginfo = "unpacked"
|
inherits = "release"
|
||||||
|
opt-level = "z"
|
||||||
|
strip = "symbols"
|
||||||
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
panic = "abort"
|
||||||
|
@@ -1 +1 @@
|
|||||||
docker/amd64/Dockerfile
|
docker/Dockerfile.debian
|
11
README.md
11
README.md
@@ -38,11 +38,11 @@ Pull the docker image and mount a volume from the host for persistent storage:
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker pull vaultwarden/server:latest
|
docker pull vaultwarden/server:latest
|
||||||
docker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server:latest
|
docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p 80:80 vaultwarden/server:latest
|
||||||
```
|
```
|
||||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
**IMPORTANT**: Most modern web browsers, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||||
|
|
||||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||||
|
|
||||||
@@ -92,4 +92,11 @@ Thanks for your contribution to the project!
|
|||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/IQ333777" style="width: 75px">
|
||||||
|
<sub><b>IQ333777</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
11
build.rs
11
build.rs
@@ -17,8 +17,15 @@ fn main() {
|
|||||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Rerun when these paths are changed.
|
||||||
|
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
||||||
|
println!("cargo:rerun-if-changed=.git");
|
||||||
|
println!("cargo:rerun-if-changed=.git/HEAD");
|
||||||
|
println!("cargo:rerun-if-changed=.git/index");
|
||||||
|
println!("cargo:rerun-if-changed=.git/refs/tags");
|
||||||
|
|
||||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||||
compile_error!("Query Logging is only allowed during development, it is not intented for production usage!");
|
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
|
||||||
|
|
||||||
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||||
// If neither exist, read from git.
|
// If neither exist, read from git.
|
||||||
@@ -72,7 +79,7 @@ fn version_from_git_info() -> Result<String, std::io::Error> {
|
|||||||
// Combined version
|
// Combined version
|
||||||
if let Some(exact) = exact_tag {
|
if let Some(exact) = exact_tag {
|
||||||
Ok(exact)
|
Ok(exact)
|
||||||
} else if &branch != "main" && &branch != "master" {
|
} else if &branch != "main" && &branch != "master" && &branch != "HEAD" {
|
||||||
Ok(format!("{last_tag}-{rev_short} ({branch})"))
|
Ok(format!("{last_tag}-{rev_short} ({branch})"))
|
||||||
} else {
|
} else {
|
||||||
Ok(format!("{last_tag}-{rev_short}"))
|
Ok(format!("{last_tag}-{rev_short}"))
|
||||||
|
28
docker/DockerSettings.yaml
Normal file
28
docker/DockerSettings.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
vault_version: "v2024.1.2"
|
||||||
|
vault_image_digest: "sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b"
|
||||||
|
# Cross Compile Docker Helper Scripts v1.3.0
|
||||||
|
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||||
|
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
|
||||||
|
rust_version: 1.75.0 # Rust version to be used
|
||||||
|
debian_version: bookworm # Debian release name to be used
|
||||||
|
alpine_version: 3.19 # Alpine version to be used
|
||||||
|
# For which platforms/architectures will we try to build images
|
||||||
|
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
# Determine the build images per OS/Arch
|
||||||
|
build_stage_image:
|
||||||
|
debian:
|
||||||
|
image: "docker.io/library/rust:{{rust_version}}-slim-{{debian_version}}"
|
||||||
|
platform: "$BUILDPLATFORM"
|
||||||
|
alpine:
|
||||||
|
image: "build_${TARGETARCH}${TARGETVARIANT}"
|
||||||
|
platform: "linux/amd64" # The Alpine build images only have linux/amd64 images
|
||||||
|
arch_image:
|
||||||
|
amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}"
|
||||||
|
arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}"
|
||||||
|
armv7: "ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-{{rust_version}}"
|
||||||
|
armv6: "ghcr.io/blackdex/rust-musl:arm-musleabi-stable-{{rust_version}}"
|
||||||
|
# The final image which will be used to distribute the container images
|
||||||
|
runtime_stage_image:
|
||||||
|
debian: "docker.io/library/debian:{{debian_version}}-slim"
|
||||||
|
alpine: "docker.io/library/alpine:{{alpine_version}}"
|
161
docker/Dockerfile.alpine
Normal file
161
docker/Dockerfile.alpine
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2024.1.2]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b as vault
|
||||||
|
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.75.0 as build_amd64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.75.0 as build_arm64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.75.0 as build_armv7
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.75.0 as build_armv6
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root" \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Shared variables across Debian and Alpine
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.19
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apk --no-cache add \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
openssl \
|
||||||
|
tzdata
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,34 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
|
||||||
# manifest, rather than the target arch. For example:
|
|
||||||
#
|
|
||||||
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
|
||||||
# amd64
|
|
||||||
#
|
|
||||||
# Recent versions of Docker have started printing a warning when the image's
|
|
||||||
# claimed arch doesn't match the host arch. For example:
|
|
||||||
#
|
|
||||||
# WARNING: The requested image's platform (linux/amd64) does not match the
|
|
||||||
# detected host platform (linux/arm/v7) and no specific platform was requested
|
|
||||||
#
|
|
||||||
# The image still works fine, but the spurious warning creates confusion.
|
|
||||||
#
|
|
||||||
# Docker doesn't seem to provide a way to directly set the arch of an image
|
|
||||||
# at build time. To resolve the build vs. target arch discrepancy, we use
|
|
||||||
# Docker Buildx to build a new set of images with the correct target arch.
|
|
||||||
#
|
|
||||||
# Docker Buildx uses this Dockerfile to build an image for each requested
|
|
||||||
# platform. Since the Dockerfile basically consists of a single `FROM`
|
|
||||||
# instruction, we're effectively telling Buildx to build a platform-specific
|
|
||||||
# image by simply copying the existing cross-built image and setting the
|
|
||||||
# correct target arch as a side effect.
|
|
||||||
#
|
|
||||||
# References:
|
|
||||||
#
|
|
||||||
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
|
||||||
#
|
|
||||||
ARG LOCAL_REPO
|
|
||||||
ARG DOCKER_TAG
|
|
||||||
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
|
196
docker/Dockerfile.debian
Normal file
196
docker/Dockerfile.debian
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2024.1.2]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ac07a71cbcd199e3c9a0639c04234ba2f1ba16cfa2a45b08a7ae27eb82f8e13b as vault
|
||||||
|
|
||||||
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.75.0-slim-bookworm as build
|
||||||
|
COPY --from=xx / /
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
|
RUN xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Environment variables for cargo across Debian and Alpine
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
if xx-info is-cross ; then \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/debian:bookworm-slim
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,66 +1,14 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
{% set build_stage_base_image = "rust:1.68.1-bullseye" %}
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "amd64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable-1.68.1" %}
|
|
||||||
{% set runtime_stage_base_image = "alpine:3.17" %}
|
|
||||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable-1.68.1" %}
|
|
||||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.17" %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable-1.68.1" %}
|
|
||||||
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.17" %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable-1.68.1" %}
|
|
||||||
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.17" %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
|
||||||
{% endif %}
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "debian:bullseye-slim" %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:bullseye" %}
|
|
||||||
{% set package_arch_name = "arm64" %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
|
||||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:bullseye" %}
|
|
||||||
{% set package_arch_name = "armel" %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:bullseye" %}
|
|
||||||
{% set package_arch_name = "armhf" %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_name is defined %}
|
|
||||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_prefix = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_target_param = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if "buildkit" in target_file %}
|
|
||||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
|
||||||
{% else %}
|
|
||||||
{% set mount_rust_cache = "" %}
|
|
||||||
{% endif %}
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
{% set vault_version = "v2023.3.0b" %}
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_image_digest = "sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee" %}
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# Using the digest instead of the tag name provides better security,
|
# Using the digest instead of the tag name provides better security,
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
@@ -70,18 +18,41 @@
|
|||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull vaultwarden/web-vault:{{ vault_version }}
|
# $ docker pull docker.io/vaultwarden/web-vault:{{ vault_version }}
|
||||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:{{ vault_version }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" docker.io/vaultwarden/web-vault:{{ vault_version }}
|
||||||
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
# [docker.io/vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
# [vaultwarden/web-vault:{{ vault_version }}]
|
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||||
#
|
#
|
||||||
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
{% if base == "debian" %}
|
||||||
FROM {{ build_stage_base_image }} as build
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
{% for arch in build_stage_image[base].arch_image %}
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build
|
||||||
|
{% if base == "debian" %}
|
||||||
|
COPY --from=xx / /
|
||||||
|
{% endif %}
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
@@ -90,148 +61,163 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
|||||||
TERM=xterm-256color \
|
TERM=xterm-256color \
|
||||||
CARGO_HOME="/root/.cargo" \
|
CARGO_HOME="/root/.cargo" \
|
||||||
USER="root"
|
USER="root"
|
||||||
|
{%- if base == "alpine" %} \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if base == "debian" %}
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
|
RUN xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
&& rustup set profile minimal
|
&& rustup set profile minimal
|
||||||
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "armv6" in target_file %}
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
|
||||||
{% endif %}
|
|
||||||
{% elif "arm" in target_file %}
|
|
||||||
# Install build dependencies for the {{ package_arch_name }} architecture
|
|
||||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-{{ package_cross_compiler }} \
|
|
||||||
libc6-dev{{ package_arch_prefix }} \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev{{ package_arch_prefix }} \
|
|
||||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
|
||||||
libmariadb3{{ package_arch_prefix }} \
|
|
||||||
libpq-dev{{ package_arch_prefix }} \
|
|
||||||
libpq5{{ package_arch_prefix }} \
|
|
||||||
libssl-dev{{ package_arch_prefix }} \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
{% if base == "debian" %}
|
||||||
COPY ./Cargo.* ./
|
# Environment variables for cargo across Debian and Alpine
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
RUN source /env-cargo && \
|
||||||
COPY ./build.rs ./build.rs
|
if xx-info is-cross ; then \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
{% if package_arch_target is defined %}
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
{% endif %}
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
{% if "alpine" in target_file %}
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
# Shared variables across Debian and Alpine
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
{% else %}
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
RUN source /env-cargo && \
|
||||||
&& find . -not -path "./target*" -delete
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
# Builds again, this time it will be the actual source files being build
|
||||||
RUN touch src/main.rs
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
|
||||||
|
|
||||||
{% if "buildkit" in target_file %}
|
|
||||||
# Add the `cap_net_bind_service` capability to allow listening on
|
|
||||||
# privileged (< 1024) ports even when running as a non-root user.
|
|
||||||
# This is only done if building with BuildKit; with the legacy
|
|
||||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/{{ package_arch_target }}/release/vaultwarden
|
|
||||||
{% else %}
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/release/vaultwarden
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM {{ runtime_stage_base_image }}
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM {{ runtime_stage_image[base] }}
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
ENV ROCKET_PROFILE="release" \
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ROCKET_PORT=80
|
ROCKET_PORT=80
|
||||||
{%- if "alpine" in runtime_stage_base_image %} \
|
{%- if base == "debian" %} \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
{% elif base == "alpine" %} \
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN mkdir /data \
|
RUN mkdir /data && \
|
||||||
{% if "alpine" in runtime_stage_base_image %}
|
{% if base == "debian" %}
|
||||||
&& apk add --no-cache \
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
apk --no-cache add \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
openssl \
|
openssl \
|
||||||
tzdata
|
tzdata
|
||||||
{% else %}
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -241,16 +227,13 @@ EXPOSE 3012
|
|||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
|
||||||
{% else %}
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
@@ -1,15 +1,4 @@
|
|||||||
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
all:
|
||||||
|
./render_template Dockerfile.j2 '{"base": "debian"}' > Dockerfile.debian
|
||||||
all: $(OBJECTS)
|
./render_template Dockerfile.j2 '{"base": "alpine"}' > Dockerfile.alpine
|
||||||
|
.PHONY: all
|
||||||
%/Dockerfile: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
184
docker/README.md
184
docker/README.md
@@ -1,3 +1,183 @@
|
|||||||
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
# Vaultwarden Container Building
|
||||||
|
|
||||||
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
To build and release new testing and stable releases of Vaultwarden we use `docker buildx bake`.<br>
|
||||||
|
This can be used locally by running the command yourself, but it is also used by GitHub Actions.
|
||||||
|
|
||||||
|
This makes it easier for us to test and maintain the different architectures we provide.<br>
|
||||||
|
We also just have two Dockerfile's one for Debian and one for Alpine based images.<br>
|
||||||
|
With just these two files we can build both Debian and Alpine images for the following platforms:
|
||||||
|
- amd64 (linux/amd64)
|
||||||
|
- arm64 (linux/arm64)
|
||||||
|
- armv7 (linux/arm/v7)
|
||||||
|
- armv6 (linux/arm/v6)
|
||||||
|
|
||||||
|
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||||
|
This ensures the container build process can run binaries from other architectures.<br>
|
||||||
|
|
||||||
|
**NOTE**: Run all the examples below from the root of the repo.<br>
|
||||||
|
|
||||||
|
|
||||||
|
## How to install QEMU binfmt support
|
||||||
|
|
||||||
|
This is different per host OS, but most support this in some way.<br>
|
||||||
|
|
||||||
|
### Ubuntu/Debian
|
||||||
|
```bash
|
||||||
|
apt install binfmt-support qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Arch Linux (others based upon it)
|
||||||
|
```bash
|
||||||
|
pacman -S qemu-user-static qemu-user-static-binfmt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fedora
|
||||||
|
```bash
|
||||||
|
dnf install qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Others
|
||||||
|
There also is an option to use an other docker container to provide support for this.
|
||||||
|
```bash
|
||||||
|
# To install and activate
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To unistall
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Single architecture container building
|
||||||
|
|
||||||
|
You can build a container per supported architecture as long as you have QEMU binfmt support installed on your system.<br>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Default bake triggers a Debian build using the hosts architecture
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl
|
||||||
|
|
||||||
|
# Bake Debian ARM64 using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl debian-arm64
|
||||||
|
|
||||||
|
# Bake Alpine ARMv6 as a release build
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-armv6
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Local Multi Architecture container building
|
||||||
|
|
||||||
|
Start the initialization, this only needs to be done once.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create and use a new buildx builder instance which connects to the host network
|
||||||
|
docker buildx create --name vaultwarden --use --driver-opt network=host
|
||||||
|
|
||||||
|
# Validate it runs
|
||||||
|
docker buildx inspect --bootstrap
|
||||||
|
|
||||||
|
# Create a local container registry directly reachable on the localhost
|
||||||
|
docker run -d --name registry --network host registry:2
|
||||||
|
```
|
||||||
|
|
||||||
|
After that is done, you should be able to build and push to the local registry.<br>
|
||||||
|
Use the following command with the modified variables to bake the Alpine images.<br>
|
||||||
|
Replace `alpine` with `debian` if you want to build the debian multi arch images.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start a buildx bake using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `bake.sh` script
|
||||||
|
|
||||||
|
To make it a bit more easier to trigger a build, there also is a `bake.sh` script.<br>
|
||||||
|
This script calls `docker buildx bake` with all the right parameters and also generates the `SOURCE_COMMIT` and `SOURCE_VERSION` variables.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
So, if you want to build a Multi Arch Alpine container pushing to your localhost registry you can run this from within the docker directory. (Just make sure you executed the initialization steps above first)
|
||||||
|
```bash
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
./bake.sh alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append both `alpine` and `debian` with `-amd64`, `-arm64`, `-armv7` or `-armv6`, which will trigger a build for that specific platform.<br>
|
||||||
|
This will also append those values to the tag so you can see the builded container when running `docker images`.
|
||||||
|
|
||||||
|
You can also append extra arguments after the target if you want. This can be useful for example to print what bake will use.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh alpine-all --print
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing baked images
|
||||||
|
|
||||||
|
To test these images you can run these images by using the correct tag and provide the platform.<br>
|
||||||
|
For example, after you have build an arm64 image via `./bake.sh debian-arm64` you can run:
|
||||||
|
```bash
|
||||||
|
docker run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `podman-bake.sh` script
|
||||||
|
|
||||||
|
To also make building easier using podman, there is a `podman-bake.sh` script.<br>
|
||||||
|
This script calls `podman buildx build` with the needed parameters and the same as `bake.sh`, it will generate some variables automatically.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
**NOTE:** Unlike the `bake.sh` script, this only supports a single `CONTAINER_REGISTRIES`, and a single `BASE_TAGS` value, no comma separated values. It also only supports building separate architectures, no Multi Arch containers.
|
||||||
|
|
||||||
|
To build an Alpine arm64 image with only sqlite support and mimalloc, run this:
|
||||||
|
```bash
|
||||||
|
DB="sqlite,enable_mimalloc" \
|
||||||
|
./podman-bake.sh alpine-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/podman-bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append extra arguments after the target if you want. This can be useful for example to disable cache like this.
|
||||||
|
```bash
|
||||||
|
./podman-bake.sh alpine-arm64 --no-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
For the podman builds you can, just like the `bake.sh` script, also append the architecture to build for that specific platform.<br>
|
||||||
|
|
||||||
|
### Testing podman builded images
|
||||||
|
|
||||||
|
The command to start a podman built container is almost the same as for the docker/bake built containers. The images start with `localhost/`, so you need to prepend that.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
localhost/vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Variables supported
|
||||||
|
| Variable | default | description |
|
||||||
|
| --------------------- | ------------------ | ----------- |
|
||||||
|
| CARGO_PROFILE | null | Which cargo profile to use. `null` means what is defined in the Dockerfile |
|
||||||
|
| DB | null | Which `features` to build. `null` means what is defined in the Dockerfile |
|
||||||
|
| SOURCE_REPOSITORY_URL | null | The source repository form where this build is triggered |
|
||||||
|
| SOURCE_COMMIT | null | The commit hash of the current commit for this build |
|
||||||
|
| SOURCE_VERSION | null | The current exact tag of this commit, else the last tag and the first 8 chars of the source commit |
|
||||||
|
| BASE_TAGS | testing | Tags to be used. Can be a comma separated value like "latest,1.29.2" |
|
||||||
|
| CONTAINER_REGISTRIES | vaultwarden/server | Comma separated value of container registries. Like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server` |
|
||||||
|
| VW_VERSION | null | To override the `SOURCE_VERSION` value. This is also used by the `build.rs` code for example |
|
||||||
|
@@ -1,121 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM rust:1.68.1-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM debian:bullseye-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,114 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.68.1 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,126 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM rust:1.68.1-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
# Add the `cap_net_bind_service` capability to allow listening on
|
|
||||||
# privileged (< 1024) ports even when running as a non-root user.
|
|
||||||
# This is only done if building with BuildKit; with the legacy
|
|
||||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/release/vaultwarden
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM debian:bullseye-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,119 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.68.1 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Add the `cap_net_bind_service` capability to allow listening on
|
|
||||||
# privileged (< 1024) ports even when running as a non-root user.
|
|
||||||
# This is only done if building with BuildKit; with the legacy
|
|
||||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/x86_64-unknown-linux-musl/release/vaultwarden
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,142 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM rust:1.68.1-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/aarch64-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,116 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.68.1 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,147 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM rust:1.68.1-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Add the `cap_net_bind_service` capability to allow listening on
|
|
||||||
# privileged (< 1024) ports even when running as a non-root user.
|
|
||||||
# This is only done if building with BuildKit; with the legacy
|
|
||||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/aarch64-unknown-linux-gnu/release/vaultwarden
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/aarch64-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,121 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.68.1 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Add the `cap_net_bind_service` capability to allow listening on
|
|
||||||
# privileged (< 1024) ports even when running as a non-root user.
|
|
||||||
# This is only done if building with BuildKit; with the legacy
|
|
||||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/aarch64-unknown-linux-musl/release/vaultwarden
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,146 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM rust:1.68.1-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/rpi-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,118 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.68.1 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,151 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM rust:1.68.1-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Add the `cap_net_bind_service` capability to allow listening on
|
|
||||||
# privileged (< 1024) ports even when running as a non-root user.
|
|
||||||
# This is only done if building with BuildKit; with the legacy
|
|
||||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/arm-unknown-linux-gnueabi/release/vaultwarden
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/rpi-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,123 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.68.1 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Add the `cap_net_bind_service` capability to allow listening on
|
|
||||||
# privileged (< 1024) ports even when running as a non-root user.
|
|
||||||
# This is only done if building with BuildKit; with the legacy
|
|
||||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/arm-unknown-linux-musleabi/release/vaultwarden
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,142 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM rust:1.68.1-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/armv7hf-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,116 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.68.1 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,147 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM rust:1.68.1-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libcap2-bin \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Add the `cap_net_bind_service` capability to allow listening on
|
|
||||||
# privileged (< 1024) ports even when running as a non-root user.
|
|
||||||
# This is only done if building with BuildKit; with the legacy
|
|
||||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/armv7-unknown-linux-gnueabihf/release/vaultwarden
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/armv7hf-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,121 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
|
||||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
|
||||||
# [vaultwarden/web-vault:v2023.3.0b]
|
|
||||||
#
|
|
||||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.68.1 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Add the `cap_net_bind_service` capability to allow listening on
|
|
||||||
# privileged (< 1024) ports even when running as a non-root user.
|
|
||||||
# This is only done if building with BuildKit; with the legacy
|
|
||||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
|
||||||
RUN setcap cap_net_bind_service=+ep target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
15
docker/bake.sh
Executable file
15
docker/bake.sh
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Make sure we set the context to `..` so it will go up one directory
|
||||||
|
docker buildx bake --progress plain --set "*.context=${BASEDIR}/.." -f "${BASEDIR}/docker-bake.hcl" "$@"
|
33
docker/bake_env.sh
Normal file
33
docker/bake_env.sh
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# If SOURCE_COMMIT is provided via env skip this
|
||||||
|
if [ -z "${SOURCE_COMMIT+x}" ]; then
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If VW_VERSION is provided via env use it as SOURCE_VERSION
|
||||||
|
# Else define it using git
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
SOURCE_VERSION="${VW_VERSION}"
|
||||||
|
else
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||||
|
GIT_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||||
|
case "${GIT_BRANCH}" in
|
||||||
|
main|master|HEAD)
|
||||||
|
# Do not add the branch name for these branches
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
SOURCE_VERSION="${SOURCE_VERSION} (${GIT_BRANCH})"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export the rendered variables above so bake will use them
|
||||||
|
export SOURCE_COMMIT
|
||||||
|
export SOURCE_VERSION
|
235
docker/docker-bake.hcl
Normal file
235
docker/docker-bake.hcl
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
// ==== Baking Variables ====
|
||||||
|
|
||||||
|
// Set which cargo profile to use, dev or release for example
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "CARGO_PROFILE" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set which DB's (features) to enable
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "DB" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The repository this build was triggered from
|
||||||
|
variable "SOURCE_REPOSITORY_URL" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The commit hash of of the current commit this build was triggered on
|
||||||
|
variable "SOURCE_COMMIT" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The version of this build
|
||||||
|
// Typically the current exact tag of this commit,
|
||||||
|
// else the last tag and the first 8 characters of the source commit
|
||||||
|
variable "SOURCE_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// This can be used to overwrite SOURCE_VERSION
|
||||||
|
// It will be used during the build.rs building stage
|
||||||
|
variable "VW_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The base tag(s) to use
|
||||||
|
// This can be a comma separated value like "testing,1.29.2"
|
||||||
|
variable "BASE_TAGS" {
|
||||||
|
default = "testing"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Which container registries should be used for the tagging
|
||||||
|
// This can be a comma separated value
|
||||||
|
// Use a full URI like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server`
|
||||||
|
variable "CONTAINER_REGISTRIES" {
|
||||||
|
default = "vaultwarden/server"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking Groups ====
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["debian"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Shared Baking ====
|
||||||
|
function "labels" {
|
||||||
|
params = []
|
||||||
|
result = {
|
||||||
|
"org.opencontainers.image.description" = "Unofficial Bitwarden compatible server written in Rust - ${SOURCE_VERSION}"
|
||||||
|
"org.opencontainers.image.licenses" = "AGPL-3.0-only"
|
||||||
|
"org.opencontainers.image.documentation" = "https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
"org.opencontainers.image.url" = "https://github.com/dani-garcia/vaultwarden"
|
||||||
|
"org.opencontainers.image.created" = "${formatdate("YYYY-MM-DD'T'hh:mm:ssZZZZZ", timestamp())}"
|
||||||
|
"org.opencontainers.image.source" = "${SOURCE_REPOSITORY_URL}"
|
||||||
|
"org.opencontainers.image.revision" = "${SOURCE_COMMIT}"
|
||||||
|
"org.opencontainers.image.version" = "${SOURCE_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "_default_attributes" {
|
||||||
|
labels = labels()
|
||||||
|
args = {
|
||||||
|
DB = "${DB}"
|
||||||
|
CARGO_PROFILE = "${CARGO_PROFILE}"
|
||||||
|
VW_VERSION = "${VW_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Debian Baking ====
|
||||||
|
|
||||||
|
// Default Debian target, will build a container using the hosts platform architecture
|
||||||
|
target "debian" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.debian"
|
||||||
|
tags = generate_tags("", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "debian-multi" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "debian-amd64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-arm64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv7" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv6" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "debian-all" {
|
||||||
|
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Alpine Baking ====
|
||||||
|
|
||||||
|
// Default Alpine target, will build a container using the hosts platform architecture
|
||||||
|
target "alpine" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.alpine"
|
||||||
|
tags = generate_tags("-alpine", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "alpine-multi" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "alpine-amd64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("-alpine", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-arm64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("-alpine", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv7" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("-alpine", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv6" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "alpine-all" {
|
||||||
|
targets = ["alpine-amd64", "alpine-arm64", "alpine-armv7", "alpine-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Bake everything locally ====
|
||||||
|
|
||||||
|
group "all" {
|
||||||
|
targets = ["debian-all", "alpine-all"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking functions ====
|
||||||
|
|
||||||
|
// This will return the local platform as amd64, arm64 or armv7 for example
|
||||||
|
// It can be used for creating a local image tag
|
||||||
|
function "platform_tag" {
|
||||||
|
params = []
|
||||||
|
result = "-${replace(replace(BAKE_LOCAL_PLATFORM, "linux/", ""), "/", "")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function "get_container_registries" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", CONTAINER_REGISTRIES))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "get_base_tags" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", BASE_TAGS))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "generate_tags" {
|
||||||
|
params = [
|
||||||
|
suffix, // What to append to the BASE_TAG when needed, like `-alpine` for example
|
||||||
|
platform // the platform we are building for if needed
|
||||||
|
]
|
||||||
|
result = flatten([
|
||||||
|
for registry in get_container_registries() :
|
||||||
|
[for base_tag in get_base_tags() :
|
||||||
|
concat(
|
||||||
|
# If the base_tag contains latest, and the suffix contains `-alpine` add a `:alpine` tag too
|
||||||
|
base_tag == "latest" ? suffix == "-alpine" ? ["${registry}:alpine${platform}"] : [] : [],
|
||||||
|
# The default tagging strategy
|
||||||
|
["${registry}:${base_tag}${suffix}${platform}"]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
])
|
||||||
|
}
|
||||||
|
|
||||||
|
function "image_index_annotations" {
|
||||||
|
params = []
|
||||||
|
result = flatten([
|
||||||
|
for key, value in labels() :
|
||||||
|
value != null ? formatlist("annotation-index.%s=%s", "${key}", "${value}") : []
|
||||||
|
])
|
||||||
|
}
|
@@ -1,16 +1,24 @@
|
|||||||
#!/bin/sh
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
# Use the value of the corresponding env var (if present),
|
# Use the value of the corresponding env var (if present),
|
||||||
# or a default value otherwise.
|
# or a default value otherwise.
|
||||||
: "${DATA_FOLDER:="data"}"
|
: "${DATA_FOLDER:="/data"}"
|
||||||
: "${ROCKET_PORT:="80"}"
|
: "${ROCKET_PORT:="80"}"
|
||||||
|
: "${ENV_FILE:="/.env"}"
|
||||||
|
|
||||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||||
|
|
||||||
|
# Check if the $ENV_FILE file exist and is readable
|
||||||
|
# If that is the case, load it into the environment before running any check
|
||||||
|
if [ -r "${ENV_FILE}" ]; then
|
||||||
|
# shellcheck disable=SC1090
|
||||||
|
. "${ENV_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
# Given a config key, return the corresponding config value from the
|
# Given a config key, return the corresponding config value from the
|
||||||
# config file. If the key doesn't exist, return an empty string.
|
# config file. If the key doesn't exist, return an empty string.
|
||||||
get_config_val() {
|
get_config_val() {
|
||||||
local key="$1"
|
key="$1"
|
||||||
# Extract a line of the form:
|
# Extract a line of the form:
|
||||||
# "domain": "https://bw.example.com/path",
|
# "domain": "https://bw.example.com/path",
|
||||||
grep "\"${key}\":" "${CONFIG_FILE}" |
|
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||||
|
105
docker/podman-bake.sh
Executable file
105
docker/podman-bake.sh
Executable file
@@ -0,0 +1,105 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Check if a target is given as first argument
|
||||||
|
# If not we assume the defaults and pass the given arguments to the podman command
|
||||||
|
case "${1}" in
|
||||||
|
alpine*|debian*)
|
||||||
|
TARGET="${1}"
|
||||||
|
# Now shift the $@ array so we only have the rest of the arguments
|
||||||
|
# This allows us too append these as extra arguments too the podman buildx build command
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
LABEL_ARGS=(
|
||||||
|
--label org.opencontainers.image.description="Unofficial Bitwarden compatible server written in Rust"
|
||||||
|
--label org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||||
|
--label org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
--label org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||||
|
--label org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
|
)
|
||||||
|
if [[ -n "${SOURCE_REPOSITORY_URL}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_COMMIT}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.revision="${SOURCE_COMMIT}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_VERSION}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.version="${SOURCE_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if and which --build-arg arguments we need to configure
|
||||||
|
BUILD_ARGS=()
|
||||||
|
if [[ -n "${DB}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg DB="${DB}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${CARGO_PROFILE}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg CARGO_PROFILE="${CARGO_PROFILE}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg VW_VERSION="${VW_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default BASE_TAGS if non are provided
|
||||||
|
if [[ -z "${BASE_TAGS}" ]]; then
|
||||||
|
BASE_TAGS="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default CONTAINER_REGISTRIES if non are provided
|
||||||
|
if [[ -z "${CONTAINER_REGISTRIES}" ]]; then
|
||||||
|
CONTAINER_REGISTRIES="vaultwarden/server"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check which Dockerfile we need to use, default is debian
|
||||||
|
case "${TARGET}" in
|
||||||
|
alpine*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-alpine"
|
||||||
|
DOCKERFILE="Dockerfile.alpine"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
DOCKERFILE="Dockerfile.debian"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Check which platform we need to build and append the BASE_TAGS with the architecture
|
||||||
|
case "${TARGET}" in
|
||||||
|
*-arm64)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-arm64"
|
||||||
|
PLATFORM="linux/arm64"
|
||||||
|
;;
|
||||||
|
*-armv7)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv7"
|
||||||
|
PLATFORM="linux/arm/v7"
|
||||||
|
;;
|
||||||
|
*-armv6)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv6"
|
||||||
|
PLATFORM="linux/arm/v6"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-amd64"
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Build the image with podman
|
||||||
|
# We use the docker format here since we are using `SHELL`, which is not supported by OCI
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
podman buildx build \
|
||||||
|
--platform="${PLATFORM}" \
|
||||||
|
--tag="${CONTAINER_REGISTRIES}:${BASE_TAGS}" \
|
||||||
|
--format=docker \
|
||||||
|
"${LABEL_ARGS[@]}" \
|
||||||
|
"${BUILD_ARGS[@]}" \
|
||||||
|
--file="${BASEDIR}/${DOCKERFILE}" "$@" \
|
||||||
|
"${BASEDIR}/.."
|
@@ -1,17 +1,31 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import os, argparse, json
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
|
# Load settings file
|
||||||
|
with open("DockerSettings.yaml", 'r') as yaml_file:
|
||||||
|
yaml_data = yaml.safe_load(yaml_file)
|
||||||
|
|
||||||
|
settings_env = jinja2.Environment(
|
||||||
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
|
)
|
||||||
|
settings_yaml = yaml.safe_load(settings_env.get_template("DockerSettings.yaml").render(yaml_data))
|
||||||
|
|
||||||
args_parser = argparse.ArgumentParser()
|
args_parser = argparse.ArgumentParser()
|
||||||
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
||||||
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
||||||
cli_args = args_parser.parse_args()
|
cli_args = args_parser.parse_args()
|
||||||
|
|
||||||
|
# Merge the default config yaml with the json arguments given.
|
||||||
render_vars = json.loads(cli_args.render_vars)
|
render_vars = json.loads(cli_args.render_vars)
|
||||||
|
settings_yaml.update(render_vars)
|
||||||
|
|
||||||
environment = jinja2.Environment(
|
environment = jinja2.Environment(
|
||||||
loader=jinja2.FileSystemLoader(os.getcwd()),
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
trim_blocks=True,
|
trim_blocks=True,
|
||||||
)
|
)
|
||||||
print(environment.get_template(cli_args.template_file).render(render_vars))
|
print(environment.get_template(cli_args.template_file).render(settings_yaml))
|
||||||
|
@@ -1,20 +0,0 @@
|
|||||||
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
|
||||||
|
|
||||||
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
|
||||||
|
|
||||||
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
|
||||||
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
|
||||||
* `COMMIT_MSG`: the message from the commit being tested and built.
|
|
||||||
* `DOCKER_REPO`: the name of the Docker repository being built.
|
|
||||||
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
|
||||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
|
||||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
|
||||||
|
|
||||||
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
|
||||||
|
|
||||||
## References
|
|
||||||
|
|
||||||
* https://docs.docker.com/docker-hub/builds/advanced/
|
|
||||||
* https://docs.docker.com/engine/reference/commandline/manifest/
|
|
||||||
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
|
||||||
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
|
@@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# The default Debian-based images support these arches for all database backends.
|
|
||||||
arches=(
|
|
||||||
amd64
|
|
||||||
armv6
|
|
||||||
armv7
|
|
||||||
arm64
|
|
||||||
)
|
|
||||||
export arches
|
|
||||||
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
distro_suffix=.alpine
|
|
||||||
fi
|
|
||||||
export distro_suffix
|
|
51
hooks/build
51
hooks/build
@@ -1,51 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
echo ">>> Building images..."
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
|
||||||
# This var is typically predefined by Docker Hub, but it won't be
|
|
||||||
# when testing locally.
|
|
||||||
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Construct a version string in the style of `build.rs`.
|
|
||||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
|
||||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
|
||||||
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
|
||||||
else
|
|
||||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
|
||||||
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
LABELS=(
|
|
||||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
|
||||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
|
||||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
|
||||||
org.opencontainers.image.licenses="AGPL-3.0-only"
|
|
||||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
|
||||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
|
||||||
org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
|
||||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
|
||||||
)
|
|
||||||
LABEL_ARGS=()
|
|
||||||
for label in "${LABELS[@]}"; do
|
|
||||||
LABEL_ARGS+=(--label "${label}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildkit as template
|
|
||||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
|
||||||
buildkit_suffix=.buildkit
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker build \
|
|
||||||
"${LABEL_ARGS[@]}" \
|
|
||||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
|
||||||
-f "docker/${arch}/Dockerfile${buildkit_suffix}${distro_suffix}" \
|
|
||||||
.
|
|
||||||
done
|
|
@@ -1,28 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# If requested, print some environment info for troubleshooting.
|
|
||||||
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
|
||||||
id
|
|
||||||
pwd
|
|
||||||
df -h
|
|
||||||
env
|
|
||||||
docker info
|
|
||||||
docker version
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install build dependencies.
|
|
||||||
deps=(
|
|
||||||
jq
|
|
||||||
)
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y "${deps[@]}"
|
|
||||||
|
|
||||||
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
|
||||||
# Git operations that we perform later, so fetch the complete history and
|
|
||||||
# tags first. Note that if the build is cached, the clone may have been
|
|
||||||
# unshallowed already; if so, unshallowing will fail, so skip it.
|
|
||||||
if [[ -f .git/shallow ]]; then
|
|
||||||
git fetch --unshallow --tags
|
|
||||||
fi
|
|
111
hooks/push
111
hooks/push
@@ -1,111 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
|
||||||
|
|
||||||
# Join a list of args with a single char.
|
|
||||||
# Ref: https://stackoverflow.com/a/17841619
|
|
||||||
join() { local IFS="$1"; shift; echo "$*"; }
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
echo ">>> Starting local Docker registry when needed..."
|
|
||||||
|
|
||||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
|
||||||
# builds, but it can't access existing images on the Docker host (like the
|
|
||||||
# cross-compiled ones we just built). Those images first need to be pushed to
|
|
||||||
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
|
||||||
# up those intermediate images on Docker Hub, it's easier to just run a local
|
|
||||||
# Docker registry, which gets cleaned up automatically once the build job ends.
|
|
||||||
#
|
|
||||||
# https://docs.docker.com/registry/deploying/
|
|
||||||
# https://hub.docker.com/_/registry
|
|
||||||
#
|
|
||||||
# Use host networking so the buildx container can access the registry via
|
|
||||||
# localhost.
|
|
||||||
#
|
|
||||||
# First check if there already is a registry container running, else skip it.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5000'; then
|
|
||||||
# defaults to port 5000
|
|
||||||
docker run -d --name registry --network host registry:2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
|
||||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
|
||||||
LOCAL_REGISTRY="localhost:5000"
|
|
||||||
REPO="${DOCKER_REPO#*/}"
|
|
||||||
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
|
||||||
|
|
||||||
echo ">>> Pushing images to local registry..."
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
docker tag "${docker_image}" "${local_image}"
|
|
||||||
docker push "${local_image}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ">>> Setting up Docker Buildx..."
|
|
||||||
|
|
||||||
# Same as earlier, use host networking so the buildx container can access the
|
|
||||||
# registry via localhost.
|
|
||||||
#
|
|
||||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
|
||||||
#
|
|
||||||
# Check if there already is a builder running, else skip this and use the existing.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! docker buildx inspect builder > /dev/null 2>&1 ; then
|
|
||||||
docker buildx create --name builder --use --driver-opt network=host
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ">>> Running Docker Buildx..."
|
|
||||||
|
|
||||||
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
|
||||||
|
|
||||||
# If the Docker tag starts with a version number, assume the latest release
|
|
||||||
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
|
||||||
# to make it easier for users to track the latest release.
|
|
||||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
tags+=("${DOCKER_REPO}:alpine")
|
|
||||||
else
|
|
||||||
tags+=("${DOCKER_REPO}:latest")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
tag_args=()
|
|
||||||
for tag in "${tags[@]}"; do
|
|
||||||
tag_args+=(--tag "${tag}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
|
||||||
# the arch list to a platform list (assuming the OS is always `linux`).
|
|
||||||
declare -A arch_to_platform=(
|
|
||||||
[amd64]="linux/amd64"
|
|
||||||
[armv6]="linux/arm/v6"
|
|
||||||
[armv7]="linux/arm/v7"
|
|
||||||
[arm64]="linux/arm64"
|
|
||||||
)
|
|
||||||
platforms=()
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
platforms+=("${arch_to_platform[$arch]}")
|
|
||||||
done
|
|
||||||
platform="$(join "," "${platforms[@]}")"
|
|
||||||
|
|
||||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
|
||||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
|
||||||
# context, which isn't needed here since the actual cross-compiled images
|
|
||||||
# have already been built.
|
|
||||||
docker buildx build \
|
|
||||||
--network host \
|
|
||||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
|
||||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
|
||||||
--platform "${platform}" \
|
|
||||||
"${tag_args[@]}" \
|
|
||||||
--push \
|
|
||||||
- < ./docker/Dockerfile.buildx
|
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE devices ADD COLUMN push_uuid TEXT;
|
@@ -0,0 +1,10 @@
|
|||||||
|
CREATE TABLE organization_api_key (
|
||||||
|
uuid CHAR(36) NOT NULL,
|
||||||
|
org_uuid CHAR(36) NOT NULL REFERENCES organizations(uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
api_key VARCHAR(255) NOT NULL,
|
||||||
|
revision_date DATETIME NOT NULL,
|
||||||
|
PRIMARY KEY(uuid, org_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
ALTER TABLE users ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL,
|
||||||
|
organization_uuid CHAR(36),
|
||||||
|
request_device_identifier CHAR(36) NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id CHAR(36),
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE collections ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY master_password_hash TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY enc_key TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN `key` TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE attachments MODIFY file_size BIGINT NOT NULL;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE devices ADD COLUMN push_uuid TEXT;
|
@@ -0,0 +1,10 @@
|
|||||||
|
CREATE TABLE organization_api_key (
|
||||||
|
uuid CHAR(36) NOT NULL,
|
||||||
|
org_uuid CHAR(36) NOT NULL REFERENCES organizations(uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
api_key VARCHAR(255),
|
||||||
|
revision_date TIMESTAMP NOT NULL,
|
||||||
|
PRIMARY KEY(uuid, org_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
ALTER TABLE users ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL,
|
||||||
|
organization_uuid CHAR(36),
|
||||||
|
request_device_identifier CHAR(36) NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id CHAR(36),
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date TIMESTAMP NOT NULL,
|
||||||
|
response_date TIMESTAMP,
|
||||||
|
authentication_date TIMESTAMP,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE collections ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN master_password_hash DROP NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN enc_key DROP NOT NULL;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE attachments
|
||||||
|
ALTER COLUMN file_size TYPE BIGINT,
|
||||||
|
ALTER COLUMN file_size SET NOT NULL;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE devices ADD COLUMN push_uuid TEXT;
|
@@ -0,0 +1,11 @@
|
|||||||
|
CREATE TABLE organization_api_key (
|
||||||
|
uuid TEXT NOT NULL,
|
||||||
|
org_uuid TEXT NOT NULL,
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
api_key TEXT NOT NULL,
|
||||||
|
revision_date DATETIME NOT NULL,
|
||||||
|
PRIMARY KEY(uuid, org_uuid),
|
||||||
|
FOREIGN KEY(org_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
ALTER TABLE users ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
organization_uuid TEXT,
|
||||||
|
request_device_identifier TEXT NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id TEXT,
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE collections ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,29 @@
|
|||||||
|
-- Create new auth_requests table with master_password_hash as nullable column
|
||||||
|
CREATE TABLE auth_requests_new (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
organization_uuid TEXT,
|
||||||
|
request_device_identifier TEXT NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id TEXT,
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT,
|
||||||
|
master_password_hash TEXT,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY (user_uuid) REFERENCES users (uuid),
|
||||||
|
FOREIGN KEY (organization_uuid) REFERENCES organizations (uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer current data to new table
|
||||||
|
INSERT INTO auth_requests_new SELECT * FROM auth_requests;
|
||||||
|
|
||||||
|
-- Drop the old table
|
||||||
|
DROP TABLE auth_requests;
|
||||||
|
|
||||||
|
-- Rename the new table to the original name
|
||||||
|
ALTER TABLE auth_requests_new RENAME TO auth_requests;
|
@@ -0,0 +1,2 @@
|
|||||||
|
-- Add the external_id to the users_organizations table
|
||||||
|
ALTER TABLE "users_organizations" ADD COLUMN "external_id" TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
@@ -0,0 +1 @@
|
|||||||
|
-- Integer size in SQLite is already i64, so we don't need to do anything
|
@@ -1 +0,0 @@
|
|||||||
1.68.1
|
|
4
rust-toolchain.toml
Normal file
4
rust-toolchain.toml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[toolchain]
|
||||||
|
channel = "1.75.0"
|
||||||
|
components = [ "rustfmt", "clippy" ]
|
||||||
|
profile = "minimal"
|
128
src/api/admin.rs
128
src/api/admin.rs
@@ -13,7 +13,10 @@ use rocket::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::log_event, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
|
api::{
|
||||||
|
core::{log_event, two_factor},
|
||||||
|
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
||||||
|
},
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
@@ -21,6 +24,7 @@ use crate::{
|
|||||||
mail,
|
mail,
|
||||||
util::{
|
util::{
|
||||||
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
||||||
|
NumberOrString,
|
||||||
},
|
},
|
||||||
CONFIG, VERSION,
|
CONFIG, VERSION,
|
||||||
};
|
};
|
||||||
@@ -36,6 +40,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
get_user_by_mail_json,
|
get_user_by_mail_json,
|
||||||
post_admin_login,
|
post_admin_login,
|
||||||
admin_page,
|
admin_page,
|
||||||
|
admin_page_login,
|
||||||
invite_user,
|
invite_user,
|
||||||
logout,
|
logout,
|
||||||
delete_user,
|
delete_user,
|
||||||
@@ -53,7 +58,8 @@ pub fn routes() -> Vec<Route> {
|
|||||||
organizations_overview,
|
organizations_overview,
|
||||||
delete_organization,
|
delete_organization,
|
||||||
diagnostics,
|
diagnostics,
|
||||||
get_diagnostics_config
|
get_diagnostics_config,
|
||||||
|
resend_user_invite,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -182,12 +188,11 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
|||||||
let claims = generate_admin_claims();
|
let claims = generate_admin_claims();
|
||||||
let jwt = encode_jwt(&claims);
|
let jwt = encode_jwt(&claims);
|
||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true);
|
||||||
.finish();
|
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
if let Some(redirect) = redirect {
|
if let Some(redirect) = redirect {
|
||||||
@@ -255,6 +260,11 @@ fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
|
|||||||
render_admin_page()
|
render_admin_page()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/", rank = 2)]
|
||||||
|
fn admin_page_login() -> ApiResult<Html<String>> {
|
||||||
|
render_admin_login(None, None)
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct InviteData {
|
struct InviteData {
|
||||||
@@ -272,12 +282,11 @@ async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
|||||||
#[post("/invite", data = "<data>")]
|
#[post("/invite", data = "<data>")]
|
||||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
let email = data.email.clone();
|
|
||||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||||
err_code!("User already exists", Status::Conflict.code)
|
err_code!("User already exists", Status::Conflict.code)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = User::new(email);
|
let mut user = User::new(data.email);
|
||||||
|
|
||||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
@@ -307,7 +316,7 @@ async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
|||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
Redirect::to(admin_path())
|
Redirect::to(admin_path())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -319,6 +328,10 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
|||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["UserEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
|
usr["LastActive"] = match u.last_active(&mut conn).await {
|
||||||
|
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||||
|
None => json!(None::<String>),
|
||||||
|
};
|
||||||
users_json.push(usr);
|
users_json.push(usr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -333,7 +346,7 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await));
|
||||||
usr["user_enabled"] = json!(u.enabled);
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||||
@@ -348,8 +361,8 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users/by-mail/<mail>")]
|
#[get("/users/by-mail/<mail>")]
|
||||||
async fn get_user_by_mail_json(mail: String, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
if let Some(u) = User::find_by_mail(&mail, &mut conn).await {
|
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["UserEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
@@ -360,8 +373,8 @@ async fn get_user_by_mail_json(mail: String, _token: AdminToken, mut conn: DbCon
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users/<uuid>")]
|
#[get("/users/<uuid>")]
|
||||||
async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let u = get_user_or_404(&uuid, &mut conn).await?;
|
let u = get_user_or_404(uuid, &mut conn).await?;
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["UserEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
@@ -369,19 +382,19 @@ async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> Js
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/delete")]
|
#[post("/users/<uuid>/delete")]
|
||||||
async fn delete_user(uuid: String, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let user = get_user_or_404(&uuid, &mut conn).await?;
|
let user = get_user_or_404(uuid, &mut conn).await?;
|
||||||
|
|
||||||
// Get the user_org records before deleting the actual user
|
// Get the user_org records before deleting the actual user
|
||||||
let user_orgs = UserOrganization::find_any_state_by_user(&uuid, &mut conn).await;
|
let user_orgs = UserOrganization::find_any_state_by_user(uuid, &mut conn).await;
|
||||||
let res = user.delete(&mut conn).await;
|
let res = user.delete(&mut conn).await;
|
||||||
|
|
||||||
for user_org in user_orgs {
|
for user_org in user_orgs {
|
||||||
log_event(
|
log_event(
|
||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
user_org.org_uuid,
|
&user_org.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -393,21 +406,29 @@ async fn delete_user(uuid: String, token: AdminToken, mut conn: DbConn) -> Empty
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/deauth")]
|
#[post("/users/<uuid>/deauth")]
|
||||||
async fn deauth_user(uuid: String, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
|
||||||
user.reset_security_stamp();
|
|
||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
|
||||||
|
|
||||||
nt.send_logout(&user, None).await;
|
nt.send_logout(&user, None).await;
|
||||||
|
|
||||||
save_result
|
if CONFIG.push_enabled() {
|
||||||
|
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||||
|
match unregister_push_device(device.push_uuid).await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
|
user.reset_security_stamp();
|
||||||
|
|
||||||
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/disable")]
|
#[post("/users/<uuid>/disable")]
|
||||||
async fn disable_user(uuid: String, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
user.enabled = false;
|
user.enabled = false;
|
||||||
@@ -420,21 +441,40 @@ async fn disable_user(uuid: String, _token: AdminToken, mut conn: DbConn, nt: No
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/enable")]
|
#[post("/users/<uuid>/enable")]
|
||||||
async fn enable_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||||
user.enabled = true;
|
user.enabled = true;
|
||||||
|
|
||||||
user.save(&mut conn).await
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
async fn remove_2fa(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
|
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&mut conn).await
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/invite/resend")]
|
||||||
|
async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if let Some(user) = User::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
//TODO: replace this with user.status check when it will be available (PR#3397)
|
||||||
|
if !user.password_hash.is_empty() {
|
||||||
|
err_code!("User already accepted invitation", Status::BadRequest.code);
|
||||||
|
}
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err_code!("User doesn't exist", Status::NotFound.code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
struct UserOrgTypeData {
|
struct UserOrgTypeData {
|
||||||
user_type: NumberOrString,
|
user_type: NumberOrString,
|
||||||
@@ -481,8 +521,8 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
|||||||
log_event(
|
log_event(
|
||||||
EventType::OrganizationUserUpdated as i32,
|
EventType::OrganizationUserUpdated as i32,
|
||||||
&user_to_edit.uuid,
|
&user_to_edit.uuid,
|
||||||
data.org_uuid,
|
&data.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -510,7 +550,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
|||||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await));
|
||||||
organizations_json.push(org);
|
organizations_json.push(org);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -519,8 +559,8 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<uuid>/delete")]
|
#[post("/organizations/<uuid>/delete")]
|
||||||
async fn delete_organization(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let org = Organization::find_by_uuid(&uuid, &mut conn).await.map_res("Organization doesn't exist")?;
|
let org = Organization::find_by_uuid(uuid, &mut conn).await.map_res("Organization doesn't exist")?;
|
||||||
org.delete(&mut conn).await
|
org.delete(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -742,14 +782,24 @@ impl<'r> FromRequest<'r> for AdminToken {
|
|||||||
|
|
||||||
let access_token = match cookies.get(COOKIE_NAME) {
|
let access_token = match cookies.get(COOKIE_NAME) {
|
||||||
Some(cookie) => cookie.value(),
|
Some(cookie) => cookie.value(),
|
||||||
None => return Outcome::Failure((Status::Unauthorized, "Unauthorized")),
|
None => {
|
||||||
|
let requested_page =
|
||||||
|
request.segments::<std::path::PathBuf>(0..).unwrap_or_default().display().to_string();
|
||||||
|
// When the requested page is empty, it is `/admin`, in that case, Forward, so it will render the login page
|
||||||
|
// Else, return a 401 failure, which will be caught
|
||||||
|
if requested_page.is_empty() {
|
||||||
|
return Outcome::Forward(Status::Unauthorized);
|
||||||
|
} else {
|
||||||
|
return Outcome::Error((Status::Unauthorized, "Unauthorized"));
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if decode_admin(access_token).is_err() {
|
if decode_admin(access_token).is_err() {
|
||||||
// Remove admin cookie
|
// Remove admin cookie
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
return Outcome::Error((Status::Unauthorized, "Session expired"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
|
@@ -1,15 +1,19 @@
|
|||||||
|
use crate::db::DbPool;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType,
|
core::log_user_event, register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult,
|
||||||
|
JsonUpcase, Notify, PasswordOrOtpData, UpdateType,
|
||||||
},
|
},
|
||||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
use rocket::{
|
use rocket::{
|
||||||
@@ -35,6 +39,7 @@ pub fn routes() -> Vec<rocket::Route> {
|
|||||||
post_verify_email_token,
|
post_verify_email_token,
|
||||||
post_delete_recover,
|
post_delete_recover,
|
||||||
post_delete_recover_token,
|
post_delete_recover_token,
|
||||||
|
post_device_token,
|
||||||
delete_account,
|
delete_account,
|
||||||
post_delete_account,
|
post_delete_account,
|
||||||
revision_date,
|
revision_date,
|
||||||
@@ -46,6 +51,14 @@ pub fn routes() -> Vec<rocket::Route> {
|
|||||||
get_known_device,
|
get_known_device,
|
||||||
get_known_device_from_path,
|
get_known_device_from_path,
|
||||||
put_avatar,
|
put_avatar,
|
||||||
|
put_device_token,
|
||||||
|
put_clear_device_token,
|
||||||
|
post_clear_device_token,
|
||||||
|
post_auth_request,
|
||||||
|
get_auth_request,
|
||||||
|
put_auth_request,
|
||||||
|
get_auth_request_response,
|
||||||
|
get_auth_requests,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,7 +146,7 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
|||||||
err!("Registration email does not match invite email")
|
err!("Registration email does not match invite email")
|
||||||
}
|
}
|
||||||
} else if Invitation::take(&email, &mut conn).await {
|
} else if Invitation::take(&email, &mut conn).await {
|
||||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
|
for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
|
||||||
user_org.status = UserOrgStatus::Accepted as i32;
|
user_org.status = UserOrgStatus::Accepted as i32;
|
||||||
user_org.save(&mut conn).await?;
|
user_org.save(&mut conn).await?;
|
||||||
}
|
}
|
||||||
@@ -169,8 +182,8 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
|||||||
user.client_kdf_iter = client_kdf_iter;
|
user.client_kdf_iter = client_kdf_iter;
|
||||||
}
|
}
|
||||||
|
|
||||||
user.client_kdf_parallelism = data.KdfMemory;
|
user.client_kdf_memory = data.KdfMemory;
|
||||||
user.client_kdf_memory = data.KdfParallelism;
|
user.client_kdf_parallelism = data.KdfParallelism;
|
||||||
|
|
||||||
user.set_password(&data.MasterPasswordHash, Some(data.Key), true, None);
|
user.set_password(&data.MasterPasswordHash, Some(data.Key), true, None);
|
||||||
user.password_hint = password_hint;
|
user.password_hint = password_hint;
|
||||||
@@ -266,10 +279,11 @@ async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, mut conn: Db
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users/<uuid>/public-key")]
|
#[get("/users/<uuid>/public-key")]
|
||||||
async fn get_public_keys(uuid: String, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let user = match User::find_by_uuid(&uuid, &mut conn).await {
|
let user = match User::find_by_uuid(uuid, &mut conn).await {
|
||||||
Some(user) => user,
|
Some(user) if user.public_key.is_some() => user,
|
||||||
None => err!("User doesn't exist"),
|
Some(_) => err_code!("User has no public_key", Status::NotFound.code),
|
||||||
|
None => err_code!("User doesn't exist", Status::NotFound.code),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
@@ -335,7 +349,7 @@ async fn post_password(
|
|||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
let save_result = user.save(&mut conn).await;
|
||||||
|
|
||||||
// Prevent loging out the client where the user requested this endpoint from.
|
// Prevent logging out the client where the user requested this endpoint from.
|
||||||
// If you do logout the user it will causes issues at the client side.
|
// If you do logout the user it will causes issues at the client side.
|
||||||
// Adding the device uuid will prevent this.
|
// Adding the device uuid will prevent this.
|
||||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||||
@@ -389,6 +403,9 @@ async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: D
|
|||||||
} else {
|
} else {
|
||||||
err!("Argon2 parallelism parameter is required.")
|
err!("Argon2 parallelism parameter is required.")
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
user.client_kdf_memory = None;
|
||||||
|
user.client_kdf_parallelism = None;
|
||||||
}
|
}
|
||||||
user.client_kdf_iter = data.KdfIterations;
|
user.client_kdf_iter = data.KdfIterations;
|
||||||
user.client_kdf_type = data.Kdf;
|
user.client_kdf_type = data.Kdf;
|
||||||
@@ -479,7 +496,7 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
|||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
let save_result = user.save(&mut conn).await;
|
||||||
|
|
||||||
// Prevent loging out the client where the user requested this endpoint from.
|
// Prevent logging out the client where the user requested this endpoint from.
|
||||||
// If you do logout the user it will causes issues at the client side.
|
// If you do logout the user it will causes issues at the client side.
|
||||||
// Adding the device uuid will prevent this.
|
// Adding the device uuid will prevent this.
|
||||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||||
@@ -489,17 +506,15 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
|||||||
|
|
||||||
#[post("/accounts/security-stamp", data = "<data>")]
|
#[post("/accounts/security-stamp", data = "<data>")]
|
||||||
async fn post_sstamp(
|
async fn post_sstamp(
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
@@ -519,6 +534,10 @@ struct EmailTokenData {
|
|||||||
|
|
||||||
#[post("/accounts/email-token", data = "<data>")]
|
#[post("/accounts/email-token", data = "<data>")]
|
||||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.email_change_allowed() {
|
||||||
|
err!("Email change is not allowed.");
|
||||||
|
}
|
||||||
|
|
||||||
let data: EmailTokenData = data.into_inner().data;
|
let data: EmailTokenData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -565,6 +584,10 @@ async fn post_email(
|
|||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
if !CONFIG.email_change_allowed() {
|
||||||
|
err!("Email change is not allowed.");
|
||||||
|
}
|
||||||
|
|
||||||
let data: ChangeEmailData = data.into_inner().data;
|
let data: ChangeEmailData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -714,18 +737,16 @@ async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, mut
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete", data = "<data>")]
|
#[post("/accounts/delete", data = "<data>")]
|
||||||
async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_delete_account(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
delete_account(data, headers, conn).await
|
delete_account(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/accounts", data = "<data>")]
|
#[delete("/accounts", data = "<data>")]
|
||||||
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn delete_account(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
user.delete(&mut conn).await
|
user.delete(&mut conn).await
|
||||||
}
|
}
|
||||||
@@ -803,16 +824,13 @@ pub async fn _prelogin(data: JsonUpcase<PreloginData>, mut conn: DbConn) -> Json
|
|||||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT, None, None),
|
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT, None, None),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut result = json!({
|
let result = json!({
|
||||||
"Kdf": kdf_type,
|
"Kdf": kdf_type,
|
||||||
"KdfIterations": kdf_iter,
|
"KdfIterations": kdf_iter,
|
||||||
|
"KdfMemory": kdf_mem,
|
||||||
|
"KdfParallelism": kdf_para,
|
||||||
});
|
});
|
||||||
|
|
||||||
if kdf_type == UserKdfType::Argon2id as i32 {
|
|
||||||
result["KdfMemory"] = Value::Number(kdf_mem.expect("Argon2 memory parameter is required.").into());
|
|
||||||
result["KdfParallelism"] = Value::Number(kdf_para.expect("Argon2 parallelism parameter is required.").into());
|
|
||||||
}
|
|
||||||
|
|
||||||
Json(result)
|
Json(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -835,20 +853,13 @@ fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _api_key(
|
async fn _api_key(data: JsonUpcase<PasswordOrOtpData>, rotate: bool, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
data: JsonUpcase<SecretVerificationRequest>,
|
|
||||||
rotate: bool,
|
|
||||||
headers: Headers,
|
|
||||||
mut conn: DbConn,
|
|
||||||
) -> JsonResult {
|
|
||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
let data: SecretVerificationRequest = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
if rotate || user.api_key.is_none() {
|
if rotate || user.api_key.is_none() {
|
||||||
user.api_key = Some(crypto::generate_api_key());
|
user.api_key = Some(crypto::generate_api_key());
|
||||||
@@ -863,29 +874,29 @@ async fn _api_key(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/api-key", data = "<data>")]
|
#[post("/accounts/api-key", data = "<data>")]
|
||||||
async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn api_key(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
_api_key(data, false, headers, conn).await
|
_api_key(data, false, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/rotate-api-key", data = "<data>")]
|
#[post("/accounts/rotate-api-key", data = "<data>")]
|
||||||
async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn rotate_api_key(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
_api_key(data, true, headers, conn).await
|
_api_key(data, true, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
// This variant is deprecated: https://github.com/bitwarden/server/pull/2682
|
// This variant is deprecated: https://github.com/bitwarden/server/pull/2682
|
||||||
#[get("/devices/knowndevice/<email>/<uuid>")]
|
#[get("/devices/knowndevice/<email>/<uuid>")]
|
||||||
async fn get_known_device_from_path(email: String, uuid: String, mut conn: DbConn) -> JsonResult {
|
async fn get_known_device_from_path(email: &str, uuid: &str, mut conn: DbConn) -> JsonResult {
|
||||||
// This endpoint doesn't have auth header
|
// This endpoint doesn't have auth header
|
||||||
let mut result = false;
|
let mut result = false;
|
||||||
if let Some(user) = User::find_by_mail(&email, &mut conn).await {
|
if let Some(user) = User::find_by_mail(email, &mut conn).await {
|
||||||
result = Device::find_by_uuid_and_user(&uuid, &user.uuid, &mut conn).await.is_some();
|
result = Device::find_by_uuid_and_user(uuid, &user.uuid, &mut conn).await.is_some();
|
||||||
}
|
}
|
||||||
Ok(Json(json!(result)))
|
Ok(Json(json!(result)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/devices/knowndevice")]
|
#[get("/devices/knowndevice")]
|
||||||
async fn get_known_device(device: KnownDevice, conn: DbConn) -> JsonResult {
|
async fn get_known_device(device: KnownDevice, conn: DbConn) -> JsonResult {
|
||||||
get_known_device_from_path(device.email, device.uuid, conn).await
|
get_known_device_from_path(&device.email, &device.uuid, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
struct KnownDevice {
|
struct KnownDevice {
|
||||||
@@ -899,29 +910,26 @@ impl<'r> FromRequest<'r> for KnownDevice {
|
|||||||
|
|
||||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
|
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
|
||||||
let email_bytes = match data_encoding::BASE64URL.decode(email_b64.as_bytes()) {
|
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
||||||
Ok(bytes) => bytes,
|
Ok(bytes) => bytes,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Outcome::Failure((
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
|
||||||
Status::BadRequest,
|
|
||||||
"X-Request-Email value failed to decode as base64url",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
match String::from_utf8(email_bytes) {
|
match String::from_utf8(email_bytes) {
|
||||||
Ok(email) => email,
|
Ok(email) => email,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value is required"));
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value is required"));
|
||||||
};
|
};
|
||||||
|
|
||||||
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
||||||
uuid.to_string()
|
uuid.to_string()
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Device-Identifier value is required"));
|
return Outcome::Error((Status::BadRequest, "X-Device-Identifier value is required"));
|
||||||
};
|
};
|
||||||
|
|
||||||
Outcome::Success(KnownDevice {
|
Outcome::Success(KnownDevice {
|
||||||
@@ -930,3 +938,279 @@ impl<'r> FromRequest<'r> for KnownDevice {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct PushToken {
|
||||||
|
PushToken: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||||
|
async fn post_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
put_device_token(uuid, data, headers, conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||||
|
async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
let data = data.into_inner().data;
|
||||||
|
let token = data.PushToken;
|
||||||
|
|
||||||
|
let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await {
|
||||||
|
Some(device) => device,
|
||||||
|
None => err!(format!("Error: device {uuid} should be present before a token can be assigned")),
|
||||||
|
};
|
||||||
|
|
||||||
|
// if the device already has been registered
|
||||||
|
if device.is_registered() {
|
||||||
|
// check if the new token is the same as the registered token
|
||||||
|
if device.push_token.is_some() && device.push_token.unwrap() == token.clone() {
|
||||||
|
debug!("Device {} is already registered and token is the same", uuid);
|
||||||
|
return Ok(());
|
||||||
|
} else {
|
||||||
|
// Try to unregister already registered device
|
||||||
|
let _ = unregister_push_device(device.push_uuid).await;
|
||||||
|
}
|
||||||
|
// clear the push_uuid
|
||||||
|
device.push_uuid = None;
|
||||||
|
}
|
||||||
|
device.push_token = Some(token);
|
||||||
|
if let Err(e) = device.save(&mut conn).await {
|
||||||
|
err!(format!("An error occurred while trying to save the device push token: {e}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
register_push_device(&mut device, &mut conn).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||||
|
async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
||||||
|
// This only clears push token
|
||||||
|
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||||
|
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||||
|
// This is somehow not implemented in any app, added it in case it is required
|
||||||
|
if !CONFIG.push_enabled() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Device::clear_push_token_by_uuid(uuid, &mut conn).await?;
|
||||||
|
unregister_push_device(device.push_uuid).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// On upstream server, both PUT and POST are declared. Implementing the POST method in case it would be useful somewhere
|
||||||
|
#[post("/devices/identifier/<uuid>/clear-token")]
|
||||||
|
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
|
||||||
|
put_clear_device_token(uuid, conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct AuthRequestRequest {
|
||||||
|
accessCode: String,
|
||||||
|
deviceIdentifier: String,
|
||||||
|
email: String,
|
||||||
|
publicKey: String,
|
||||||
|
#[serde(alias = "type")]
|
||||||
|
_type: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/auth-requests", data = "<data>")]
|
||||||
|
async fn post_auth_request(
|
||||||
|
data: Json<AuthRequestRequest>,
|
||||||
|
headers: ClientHeaders,
|
||||||
|
mut conn: DbConn,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
|
let data = data.into_inner();
|
||||||
|
|
||||||
|
let user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut auth_request = AuthRequest::new(
|
||||||
|
user.uuid.clone(),
|
||||||
|
data.deviceIdentifier.clone(),
|
||||||
|
headers.device_type,
|
||||||
|
headers.ip.ip.to_string(),
|
||||||
|
data.accessCode,
|
||||||
|
data.publicKey,
|
||||||
|
);
|
||||||
|
auth_request.save(&mut conn).await?;
|
||||||
|
|
||||||
|
nt.send_auth_request(&user.uuid, &auth_request.uuid, &data.deviceIdentifier, &mut conn).await;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"id": auth_request.uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": null,
|
||||||
|
"masterPasswordHash": null,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": null,
|
||||||
|
"requestApproved": false,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object": "auth-request"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/auth-requests/<uuid>")]
|
||||||
|
async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult {
|
||||||
|
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Some(auth_request) => auth_request,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
Ok(Json(json!(
|
||||||
|
{
|
||||||
|
"id": uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": auth_request.enc_key,
|
||||||
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": auth_request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
}
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct AuthResponseRequest {
|
||||||
|
deviceIdentifier: String,
|
||||||
|
key: String,
|
||||||
|
masterPasswordHash: Option<String>,
|
||||||
|
requestApproved: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/auth-requests/<uuid>", data = "<data>")]
|
||||||
|
async fn put_auth_request(
|
||||||
|
uuid: &str,
|
||||||
|
data: Json<AuthResponseRequest>,
|
||||||
|
mut conn: DbConn,
|
||||||
|
ant: AnonymousNotify<'_>,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
|
let data = data.into_inner();
|
||||||
|
let mut auth_request: AuthRequest = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Some(auth_request) => auth_request,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auth_request.approved = Some(data.requestApproved);
|
||||||
|
auth_request.enc_key = Some(data.key);
|
||||||
|
auth_request.master_password_hash = data.masterPasswordHash;
|
||||||
|
auth_request.response_device_id = Some(data.deviceIdentifier.clone());
|
||||||
|
auth_request.save(&mut conn).await?;
|
||||||
|
|
||||||
|
if auth_request.approved.unwrap_or(false) {
|
||||||
|
ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await;
|
||||||
|
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.deviceIdentifier, &mut conn).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
Ok(Json(json!(
|
||||||
|
{
|
||||||
|
"id": uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": auth_request.enc_key,
|
||||||
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": auth_request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
}
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/auth-requests/<uuid>/response?<code>")]
|
||||||
|
async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) -> JsonResult {
|
||||||
|
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Some(auth_request) => auth_request,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !auth_request.check_access_code(code) {
|
||||||
|
err!("Access code invalid doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
Ok(Json(json!(
|
||||||
|
{
|
||||||
|
"id": uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": auth_request.enc_key,
|
||||||
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": auth_request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
}
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/auth-requests")]
|
||||||
|
async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
|
let auth_requests = AuthRequest::find_by_user(&headers.user.uuid, &mut conn).await;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"data": auth_requests
|
||||||
|
.iter()
|
||||||
|
.filter(|request| request.approved.is_none())
|
||||||
|
.map(|request| {
|
||||||
|
let response_date_utc = request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
json!({
|
||||||
|
"id": request.uuid,
|
||||||
|
"publicKey": request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(request.device_type).to_string(),
|
||||||
|
"requestIpAddress": request.request_ip,
|
||||||
|
"key": request.enc_key,
|
||||||
|
"masterPasswordHash": request.master_password_hash,
|
||||||
|
"creationDate": request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
})
|
||||||
|
}).collect::<Vec<Value>>(),
|
||||||
|
"continuationToken": null,
|
||||||
|
"object": "list"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn purge_auth_requests(pool: DbPool) {
|
||||||
|
debug!("Purging auth requests");
|
||||||
|
if let Ok(mut conn) = pool.get().await {
|
||||||
|
AuthRequest::purge_expired_auth_requests(&mut conn).await;
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while purging trashed ciphers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user