mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 18:55:57 +03:00
Compare commits
339 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
fd27759a95 | ||
|
01d8056c73 | ||
|
81fa33ebb5 | ||
|
e8aa3bc066 | ||
|
0bf0125e82 | ||
|
6209e778e5 | ||
|
5323283f98 | ||
|
57e17d0648 | ||
|
da55d5ec70 | ||
|
828a060698 | ||
|
3e5971b9db | ||
|
47c2625d38 | ||
|
49af9cf4f5 | ||
|
6b1daeba05 | ||
|
9f1240d8d9 | ||
|
a8138be69b | ||
|
ea57dc3bc9 | ||
|
131348a49f | ||
|
b22564cb00 | ||
|
16eb0a56f9 | ||
|
3e4ff47a38 | ||
|
8ea01a67f6 | ||
|
aa5cc642e1 | ||
|
a121cb6f00 | ||
|
60164182ae | ||
|
f842a80cdb | ||
|
4b6a574ee0 | ||
|
f9ebb780f9 | ||
|
1fc6c30652 | ||
|
46a1a013cd | ||
|
551810c486 | ||
|
b987ba506d | ||
|
84810f2bb2 | ||
|
424d666a50 | ||
|
a71359f647 | ||
|
d93c344176 | ||
|
b9c3213b90 | ||
|
95e24ffc51 | ||
|
00d56d7295 | ||
|
7436b454db | ||
|
8da5b99482 | ||
|
2969e87b52 | ||
|
ce62e898c3 | ||
|
431462d839 | ||
|
7d0e234b34 | ||
|
dad1b1bee9 | ||
|
9312cebee3 | ||
|
cdf5b6ec2d | ||
|
ce99fc8f95 | ||
|
a75d050001 | ||
|
75cfd10f11 | ||
|
9859ba6339 | ||
|
513056f711 | ||
|
ebe334fcc7 | ||
|
0eec12472e | ||
|
39106d440a | ||
|
9117095764 | ||
|
099bba950c | ||
|
e37ff60617 | ||
|
5b14608041 | ||
|
ad92692bab | ||
|
d956d42903 | ||
|
d69be7d03a | ||
|
f82de8d00d | ||
|
c836f88ff2 | ||
|
8b660ae090 | ||
|
9323c57f49 | ||
|
85e3c73525 | ||
|
a74bc2e58f | ||
|
0680638933 | ||
|
46d31ee5f7 | ||
|
e794b397d3 | ||
|
d41350050b | ||
|
4cd5b06b7f | ||
|
cd768439d2 | ||
|
9e5fd2d576 | ||
|
ecb46f591c | ||
|
d62d53aa8e | ||
|
2c515ab13c | ||
|
83d556ff0c | ||
|
678d313836 | ||
|
705d840ea3 | ||
|
7dff8c01dd | ||
|
5860679624 | ||
|
4628e4519d | ||
|
b884fd20a1 | ||
|
67c657003d | ||
|
580c1bbc7d | ||
|
2b6383d243 | ||
|
f27455a26f | ||
|
1d4f900e48 | ||
|
c5ca588a6f | ||
|
06888251e3 | ||
|
1a6e4cf4e4 | ||
|
9f86196a9d | ||
|
1e31043fb3 | ||
|
85adcf1ae5 | ||
|
9abb4d2873 | ||
|
235ff44736 | ||
|
9c2d741749 | ||
|
37cc0c34cf | ||
|
5633b6ac94 | ||
|
175f2aeace | ||
|
feefe69094 | ||
|
46df3ee7cd | ||
|
bb945ad01b | ||
|
de86aa671e | ||
|
e38771bbbd | ||
|
a3f9a8d7dc | ||
|
4b6bc6ef66 | ||
|
455a23361f | ||
|
1a8ec04733 | ||
|
4e60df7a08 | ||
|
219a9d9f5e | ||
|
48baf723a4 | ||
|
6530904883 | ||
|
d15d24f4ff | ||
|
8d992d637e | ||
|
6ebc83c3b7 | ||
|
b32f4451ee | ||
|
99142c7552 | ||
|
db710bb931 | ||
|
a9e9a397d8 | ||
|
d46a6ac687 | ||
|
1eb5495802 | ||
|
7cf8809d77 | ||
|
043aa27aa3 | ||
|
9824d94a1c | ||
|
e8ef76b8f9 | ||
|
be1ddb4203 | ||
|
caddf21fca | ||
|
5379329ef7 | ||
|
6faaeaae66 | ||
|
3fed323385 | ||
|
58a928547d | ||
|
558410c5bd | ||
|
0dc0decaa7 | ||
|
d11d663c5c | ||
|
771233176f | ||
|
ed70b07d81 | ||
|
e25fc7083d | ||
|
fa364c3f2c | ||
|
b5f9fe4d3b | ||
|
013d4c28b2 | ||
|
63acc8619b | ||
|
ec920b5756 | ||
|
95caaf2a40 | ||
|
7099f8bee8 | ||
|
b41a0d840c | ||
|
c577ade90e | ||
|
257b143df1 | ||
|
34ee326ce9 | ||
|
090104ce1b | ||
|
3305d5dc92 | ||
|
296063e135 | ||
|
b9daa59e5d | ||
|
5bdcfe128d | ||
|
1842a796fb | ||
|
ce99e5c583 | ||
|
0c96c2d305 | ||
|
5796b6b554 | ||
|
c7ab27c86f | ||
|
8c03746a67 | ||
|
8746d36845 | ||
|
448e6ac917 | ||
|
729c9cff41 | ||
|
22b9c80007 | ||
|
ab4355cfed | ||
|
948dc82228 | ||
|
bc74fd23e7 | ||
|
37776241be | ||
|
feba41ec88 | ||
|
6a8f42da8a | ||
|
670d8cb83a | ||
|
2f7fbde789 | ||
|
c698bca2b9 | ||
|
0b6a003a8b | ||
|
c64560016e | ||
|
978be0b4a9 | ||
|
b58bff1178 | ||
|
2f3e18caa9 | ||
|
6a291040bd | ||
|
dbc082dc75 | ||
|
32a0dd09bf | ||
|
f847c6e225 | ||
|
99da5fbebb | ||
|
6a0d024c69 | ||
|
b24929a243 | ||
|
9a47821642 | ||
|
d69968313b | ||
|
3c377d97dc | ||
|
ea15218197 | ||
|
0eee907c88 | ||
|
c877583979 | ||
|
844cf70345 | ||
|
a0d92a167c | ||
|
d7b0d6f9f5 | ||
|
4c3b328aca | ||
|
260ffee093 | ||
|
c59cfe3371 | ||
|
0822c0c128 | ||
|
57a88f0a1b | ||
|
87393409f9 | ||
|
062f5e4712 | ||
|
aaba1e8368 | ||
|
ff2684dfee | ||
|
6b5fa201aa | ||
|
7167e443ca | ||
|
175d647e47 | ||
|
4c324e1160 | ||
|
0365b7c6a4 | ||
|
19889187a5 | ||
|
9571277c44 | ||
|
a202da9e23 | ||
|
e5a77a477d | ||
|
c05dc50f53 | ||
|
3bbdbb832c | ||
|
d9684bef6b | ||
|
db0c45c172 | ||
|
ad4393e3f7 | ||
|
f83a8a36d1 | ||
|
0e9eba8c8b | ||
|
d5c760960a | ||
|
2c6ef2bc68 | ||
|
7032ae5587 | ||
|
eba22c2d94 | ||
|
11cc9ae0c0 | ||
|
fb648db47d | ||
|
959283d333 | ||
|
385c2227e7 | ||
|
6d9f03e84b | ||
|
6a972e4b19 | ||
|
171b174ce9 | ||
|
93b7ded1e6 | ||
|
29c6b145ca | ||
|
a7a479623c | ||
|
83dff9ae6e | ||
|
6b2cc5a3ee | ||
|
5247e0d773 | ||
|
05b308b8b4 | ||
|
9621278fca | ||
|
570d6c8bf9 | ||
|
ad48e9ed0f | ||
|
f724addf9a | ||
|
aa20974703 | ||
|
a846f6c610 | ||
|
c218c34812 | ||
|
2626e66873 | ||
|
81e0e1b339 | ||
|
fd1354d00e | ||
|
071a3b2a32 | ||
|
32cfaab5ee | ||
|
d348f12a0e | ||
|
11845d9f5b | ||
|
de70fbf88a | ||
|
0b04caab78 | ||
|
4c78c5a9c9 | ||
|
73f0841f17 | ||
|
4559e85daa | ||
|
bbef332e25 | ||
|
1e950c7dbc | ||
|
f14e19a3d8 | ||
|
668d5c23dc | ||
|
fb6f96f5c3 | ||
|
6e6e34ff18 | ||
|
790146bfac | ||
|
af625930d6 | ||
|
a28ebcb401 | ||
|
77e47ddd1f | ||
|
5b620ba6cd | ||
|
d5f9b33f66 | ||
|
596c9b8691 | ||
|
d4357eb55a | ||
|
b37f0dfde3 | ||
|
624791e09a | ||
|
f9a73a9bbe | ||
|
35868dd72c | ||
|
979d010dc2 | ||
|
b34d548246 | ||
|
a87646b8cb | ||
|
a2411eef56 | ||
|
52ed8e4d75 | ||
|
24c914799d | ||
|
db53511855 | ||
|
325691e588 | ||
|
fac3cb687d | ||
|
afbf1db331 | ||
|
1aefaec297 | ||
|
f1d3fb5d40 | ||
|
ac2723f898 | ||
|
2fffaec226 | ||
|
5c54dfee3a | ||
|
967d2d78ec | ||
|
1aa5e0d4dc | ||
|
b47cf97409 | ||
|
5e802f8aa3 | ||
|
0bdeb02a31 | ||
|
b03698fadb | ||
|
39d1a09704 | ||
|
a447e4e7ef | ||
|
4eee6e7aee | ||
|
b6fde857a7 | ||
|
3c66deb5cc | ||
|
4146612a32 | ||
|
a314933557 | ||
|
c5d7e3f2bc | ||
|
c95a2881b5 | ||
|
4c3727b4a3 | ||
|
a1f304dff7 | ||
|
a8870eef0d | ||
|
afaebc6cf3 | ||
|
8f4a1f4fc2 | ||
|
0807783388 | ||
|
80d4061d14 | ||
|
dc2f8e5c85 | ||
|
aee1ea032b | ||
|
484e82fb9f | ||
|
322a08edfb | ||
|
08afc312c3 | ||
|
5571a5d8ed | ||
|
6a8c65493f | ||
|
dfdf4473ea | ||
|
8bbbff7567 | ||
|
42e37ebea1 | ||
|
632f4d5453 | ||
|
6c5e35ce5c | ||
|
4ff15f6dc2 | ||
|
ec8028aef2 | ||
|
63cbd9ef9c | ||
|
9cca64003a | ||
|
819d5e2dc8 | ||
|
3b06ab296b | ||
|
0de52c6c99 | ||
|
e3b00b59a7 | ||
|
5a390a973f | ||
|
1ee8e44912 | ||
|
86685c1cd2 | ||
|
e4d08836e2 | ||
|
c2a324e5da |
@@ -3,6 +3,7 @@ target
|
||||
|
||||
# Data folder
|
||||
data
|
||||
.env
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
@@ -10,5 +11,15 @@ data
|
||||
*.iml
|
||||
|
||||
# Documentation
|
||||
.github
|
||||
*.md
|
||||
*.txt
|
||||
*.yml
|
||||
*.yaml
|
||||
|
||||
# Docker folders
|
||||
hooks
|
||||
tools
|
||||
|
||||
# Web vault
|
||||
web-vault
|
109
.env.template
109
.env.template
@@ -1,19 +1,34 @@
|
||||
## Bitwarden_RS Configuration File
|
||||
## Uncomment any of the following lines to change the defaults
|
||||
##
|
||||
## Be aware that most of these settings will be overridden if they were changed
|
||||
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||
|
||||
## Main data folder
|
||||
# DATA_FOLDER=data
|
||||
|
||||
## Database URL
|
||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||
## When using MySQL, this it is the URL to the DB, including username and password:
|
||||
## Format: mysql://[user[:password]@]host/database_name
|
||||
# DATABASE_URL=data/db.sqlite3
|
||||
## When using MySQL, specify an appropriate connection URI.
|
||||
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
||||
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||
## or keyword/value connection string.
|
||||
## Details:
|
||||
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
||||
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||
|
||||
## Database max connections
|
||||
## Define the size of the connection pool used for connecting to the database.
|
||||
# DATABASE_MAX_CONNS=10
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
# RSA_KEY_FILENAME=data/rsa_key
|
||||
# ICON_CACHE_FOLDER=data/icon_cache
|
||||
# ATTACHMENTS_FOLDER=data/attachments
|
||||
# SENDS_FOLDER=data/sends
|
||||
|
||||
## Templates data folder, by default uses embedded templates
|
||||
## Check source code to see the format
|
||||
@@ -44,6 +59,10 @@
|
||||
## Enable extended logging, which shows timestamps and targets in the logs
|
||||
# EXTENDED_LOGGING=true
|
||||
|
||||
## Timestamp format used in extended logging.
|
||||
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||
|
||||
## Logging to file
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# LOG_FILE=/path/to/log
|
||||
@@ -68,6 +87,10 @@
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
|
||||
## Database connection retries
|
||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||
# DB_CONNECTION_RETRIES=15
|
||||
|
||||
## Disable icon downloading
|
||||
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
||||
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||
@@ -82,10 +105,11 @@
|
||||
## Icon blacklist Regex
|
||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
|
||||
## NOTE: Always enclose this regex withing single quotes!
|
||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||
|
||||
## Any IP which is not defined as a global IP will be blacklisted.
|
||||
## Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||
|
||||
## Disable 2FA remember
|
||||
@@ -93,6 +117,18 @@
|
||||
## Note that the checkbox would still be present, but ignored.
|
||||
# DISABLE_2FA_REMEMBER=false
|
||||
|
||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||
# EMAIL_ATTEMPTS_LIMIT=3
|
||||
|
||||
## Token expiration time
|
||||
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||
# EMAIL_EXPIRATION_TIME=600
|
||||
|
||||
## Email token size
|
||||
## Number of digits in an email token (min: 6, max: 19).
|
||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||
# EMAIL_TOKEN_SIZE=6
|
||||
|
||||
## Controls if new users can register
|
||||
# SIGNUPS_ALLOWED=true
|
||||
|
||||
@@ -114,6 +150,14 @@
|
||||
## even if SIGNUPS_ALLOWED is set to false
|
||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||
|
||||
## Controls which users can create new orgs.
|
||||
## Blank or 'all' means all users can create orgs (this is the default):
|
||||
# ORG_CREATION_USERS=
|
||||
## 'none' means no users can create orgs:
|
||||
# ORG_CREATION_USERS=none
|
||||
## A comma-separated list means only those users can create orgs:
|
||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||
|
||||
## Token for the admin interface, preferably use a long random string
|
||||
## One option is to use 'openssl rand -base64 48'
|
||||
## If not set, the admin panel is disabled
|
||||
@@ -125,6 +169,16 @@
|
||||
|
||||
## Invitations org admins to invite users, even when signups are disabled
|
||||
# INVITATIONS_ALLOWED=true
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Bitwarden_RS
|
||||
|
||||
## Per-organization attachment limit (KB)
|
||||
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||
# ORG_ATTACHMENT_LIMIT=
|
||||
## Per-user attachment limit (KB).
|
||||
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
||||
# USER_ATTACHMENT_LIMIT=
|
||||
|
||||
|
||||
## Controls the PBBKDF password iterations to apply on the server
|
||||
## The change only applies when the password is changed
|
||||
@@ -140,6 +194,13 @@
|
||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||
# DOMAIN=https://bw.domain.tld:8443
|
||||
|
||||
## Allowed iframe ancestors (Know the risks!)
|
||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||
## Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
|
||||
## This adds the configured value to the 'Content-Security-Policy' headers 'frame-ancestors' value.
|
||||
## Multiple values must be separated with a whitespace.
|
||||
# ALLOWED_IFRAME_ANCESTORS=
|
||||
|
||||
## Yubico (Yubikey) Settings
|
||||
## Set your Client ID and Secret Key for Yubikey OTP
|
||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||
@@ -183,11 +244,45 @@
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
||||
# SMTP_FROM_NAME=Bitwarden_RS
|
||||
# SMTP_PORT=587
|
||||
# SMTP_SSL=true
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||
# SMTP_USERNAME=username
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_AUTH_MECHANISM="Plain"
|
||||
# SMTP_TIMEOUT=15
|
||||
|
||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||
## Multiple options need to be separated by a comma ','.
|
||||
# SMTP_AUTH_MECHANISM="Plain"
|
||||
|
||||
## Server name sent during the SMTP HELO
|
||||
## By default this value should be is on the machine's hostname,
|
||||
## but might need to be changed in case it trips some anti-spam filters
|
||||
# HELO_NAME=
|
||||
|
||||
## SMTP debugging
|
||||
## When set to true this will output very detailed SMTP messages.
|
||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||
# SMTP_DEBUG=false
|
||||
|
||||
## Accept Invalid Hostnames
|
||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||
|
||||
## Accept Invalid Certificates
|
||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||
|
||||
## Require new device emails. When a user logs in an email is required to be sent.
|
||||
## If sending the email fails the login attempt will fail!!
|
||||
# REQUIRE_DEVICE_EMAIL=false
|
||||
|
||||
## HIBP Api Key
|
||||
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||
# HIBP_API_KEY=
|
||||
|
||||
# vim: syntax=ini
|
||||
|
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# Ignore vendored scripts in GitHub stats
|
||||
src/static/* linguist-vendored
|
||||
|
56
.github/ISSUE_TEMPLATE/bug_report.md
vendored
56
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,42 +1,66 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
about: Use this ONLY for bugs in bitwarden_rs itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
<!--
|
||||
# ###
|
||||
NOTE: Please update to the latest version of bitwarden_rs before reporting an issue!
|
||||
This saves you and us a lot of time and troubleshooting.
|
||||
See:
|
||||
* https://github.com/dani-garcia/bitwarden_rs/issues/1180
|
||||
* https://github.com/dani-garcia/bitwarden_rs/wiki/Updating-the-bitwarden-image
|
||||
# ###
|
||||
-->
|
||||
|
||||
<!--
|
||||
Please fill out the following template to make solving your problem easier and faster for us.
|
||||
This is only a guideline. If you think that parts are unneccessary for your issue, feel free to remove them.
|
||||
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
||||
|
||||
Remember to hide/obfuscate personal and confidential information,
|
||||
such as names, global IP/DNS adresses and especially passwords, if neccessary.
|
||||
Remember to hide/redact personal or confidential information,
|
||||
such as passwords, IP addresses, and DNS names as appropriate.
|
||||
-->
|
||||
|
||||
### Subject of the issue
|
||||
<!-- Describe your issue here.-->
|
||||
<!-- Describe your issue here. -->
|
||||
|
||||
### Your environment
|
||||
<!-- The version number, obtained from the logs or the admin page -->
|
||||
* Bitwarden_rs version:
|
||||
<!-- How the server was installed: Docker image / package / built from source -->
|
||||
### Deployment environment
|
||||
|
||||
<!--
|
||||
=========================================================================================
|
||||
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
|
||||
That will auto-generate most of the info requested in this section.
|
||||
=========================================================================================
|
||||
-->
|
||||
|
||||
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
||||
<!-- This is NOT the version number shown on the web vault, which is versioned separately from bitwarden_rs -->
|
||||
<!-- Remember to check if your issue exists on the latest version first! -->
|
||||
* bitwarden_rs version:
|
||||
|
||||
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
||||
* Install method:
|
||||
* Clients used: <!-- if applicable -->
|
||||
|
||||
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
|
||||
|
||||
* Reverse proxy and version: <!-- if applicable -->
|
||||
* Version of mysql/postgresql: <!-- if applicable -->
|
||||
* Other relevant information:
|
||||
|
||||
* MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
|
||||
|
||||
* Other relevant details:
|
||||
|
||||
### Steps to reproduce
|
||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
||||
and how did you start bitwarden_rs? -->
|
||||
|
||||
### Expected behaviour
|
||||
<!-- Tell us what should happen -->
|
||||
<!-- Tell us what you expected to happen -->
|
||||
|
||||
### Actual behaviour
|
||||
<!-- Tell us what happens instead -->
|
||||
<!-- Tell us what actually happened -->
|
||||
|
||||
### Relevant logs
|
||||
<!-- Share some logfiles, screenshots or output of relevant programs with us. -->
|
||||
### Troubleshooting data
|
||||
<!-- Share any log files, screenshots, or other relevant troubleshooting data -->
|
||||
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Discourse forum for bitwarden_rs
|
||||
url: https://bitwardenrs.discourse.group/
|
||||
about: Use this forum to request features or get help with usage/configuration.
|
||||
- name: GitHub Discussions for bitwarden_rs
|
||||
url: https://github.com/dani-garcia/bitwarden_rs/discussions
|
||||
about: An alternative to the Discourse forum, if this is easier for you.
|
11
.github/ISSUE_TEMPLATE/feature_request.md
vendored
11
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,11 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: better for forum
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Please submit all your feature requests to the forum
|
||||
Link: https://bitwardenrs.discourse.group/c/feature-requests
|
@@ -1,11 +0,0 @@
|
||||
---
|
||||
name: Help with installation/configuration
|
||||
about: Any questions about the setup of bitwarden_rs
|
||||
title: ''
|
||||
labels: better for forum
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Please submit all your third party help requests to the forum
|
||||
Link: https://bitwardenrs.discourse.group/c/help
|
@@ -1,11 +0,0 @@
|
||||
---
|
||||
name: Help with proxy/database/NAS setup
|
||||
about: Any questions about third party software
|
||||
title: ''
|
||||
labels: better for forum
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Please submit all your third party help requests to the forum
|
||||
Link: https://bitwardenrs.discourse.group/c/third-party-help
|
144
.github/workflows/build.yml
vendored
Normal file
144
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- "azure-pipelines.yml"
|
||||
- "docker/**"
|
||||
- "hooks/**"
|
||||
- "tools/**"
|
||||
pull_request:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- "azure-pipelines.yml"
|
||||
- "docker/**"
|
||||
- "hooks/**"
|
||||
- "tools/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
channel:
|
||||
- nightly
|
||||
# - stable
|
||||
target-triple:
|
||||
- x86_64-unknown-linux-gnu
|
||||
# - x86_64-unknown-linux-musl
|
||||
include:
|
||||
- target-triple: x86_64-unknown-linux-gnu
|
||||
host-triple: x86_64-unknown-linux-gnu
|
||||
features: "sqlite,mysql,postgresql"
|
||||
channel: nightly
|
||||
os: ubuntu-18.04
|
||||
ext:
|
||||
# - target-triple: x86_64-unknown-linux-gnu
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,mysql,postgresql"
|
||||
# channel: stable
|
||||
# os: ubuntu-18.04
|
||||
# ext:
|
||||
# - target-triple: x86_64-unknown-linux-musl
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,postgresql"
|
||||
# channel: nightly
|
||||
# os: ubuntu-18.04
|
||||
# ext:
|
||||
# - target-triple: x86_64-unknown-linux-musl
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,postgresql"
|
||||
# channel: stable
|
||||
# os: ubuntu-18.04
|
||||
# ext:
|
||||
|
||||
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install musl-tools when needed
|
||||
- name: Install musl tools
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
|
||||
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
|
||||
# End Install musl-tools when needed
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: Install dependencies Ubuntu
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||
if: startsWith( matrix.os, 'ubuntu' )
|
||||
# End Install dependencies
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
# End Enable Rust Caching
|
||||
|
||||
|
||||
# Uses the rust-toolchain file to determine version
|
||||
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target-triple }}
|
||||
components: clippy
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Run cargo tests (In release mode to speed up future builds)
|
||||
- name: '`cargo test --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||
# End Run cargo tests
|
||||
|
||||
|
||||
# Run cargo clippy (In release mode to speed up future builds)
|
||||
- name: '`cargo clippy --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||
# End Run cargo clippy
|
||||
|
||||
|
||||
# Build the binary
|
||||
- name: '`cargo build --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||
# End Build the binary
|
||||
|
||||
|
||||
# Upload artifact to Github Actions
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
# End Upload artifact to Github Actions
|
||||
|
||||
|
||||
## This is not used at the moment
|
||||
## We could start using this when we can build static binaries
|
||||
# Upload to github actions release
|
||||
# - name: Release
|
||||
# uses: Shopify/upload-to-release@1
|
||||
# if: startsWith(github.ref, 'refs/tags/')
|
||||
# with:
|
||||
# name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
# path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# End Upload to github actions release
|
34
.github/workflows/hadolint.yml
vendored
Normal file
34
.github/workflows/hadolint.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Hadolint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths:
|
||||
- "docker/**"
|
||||
|
||||
jobs:
|
||||
hadolint:
|
||||
name: Validate Dockerfile syntax
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Download hadolint
|
||||
- name: Download hadolint
|
||||
shell: bash
|
||||
run: |
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo chmod +x /usr/local/bin/hadolint
|
||||
env:
|
||||
HADOLINT_VERSION: 1.19.0
|
||||
# End Download hadolint
|
||||
|
||||
# Test Dockerfiles
|
||||
- name: Run hadolint
|
||||
shell: bash
|
||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored | xargs hadolint
|
||||
# End Test Dockerfiles
|
148
.github/workflows/workspace.yml
vendored
148
.github/workflows/workspace.yml
vendored
@@ -1,148 +0,0 @@
|
||||
name: Workflow
|
||||
|
||||
on:
|
||||
push:
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
#pull_request:
|
||||
# paths-ignore:
|
||||
# - "**.md"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
db-backend: [sqlite, mysql, postgresql]
|
||||
target:
|
||||
- x86_64-unknown-linux-gnu
|
||||
# - x86_64-unknown-linux-musl
|
||||
# - x86_64-apple-darwin
|
||||
# - x86_64-pc-windows-msvc
|
||||
include:
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-latest
|
||||
ext:
|
||||
# - target: x86_64-unknown-linux-musl
|
||||
# os: ubuntu-latest
|
||||
# ext:
|
||||
# - target: x86_64-apple-darwin
|
||||
# os: macOS-latest
|
||||
# ext:
|
||||
# - target: x86_64-pc-windows-msvc
|
||||
# os: windows-latest
|
||||
# ext: .exe
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
# - name: Cache choco cache
|
||||
# uses: actions/cache@v1.0.3
|
||||
# if: matrix.os == 'windows-latest'
|
||||
# with:
|
||||
# path: ~\AppData\Local\Temp\chocolatey
|
||||
# key: ${{ runner.os }}-choco-cache-${{ matrix.db-backend }}
|
||||
|
||||
- name: Cache vcpkg installed
|
||||
uses: actions/cache@v1.0.3
|
||||
if: matrix.os == 'windows-latest'
|
||||
with:
|
||||
path: $VCPKG_ROOT/installed
|
||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
||||
env:
|
||||
VCPKG_ROOT: 'C:\vcpkg'
|
||||
|
||||
- name: Cache vcpkg downloads
|
||||
uses: actions/cache@v1.0.3
|
||||
if: matrix.os == 'windows-latest'
|
||||
with:
|
||||
path: $VCPKG_ROOT/downloads
|
||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
||||
env:
|
||||
VCPKG_ROOT: 'C:\vcpkg'
|
||||
|
||||
# - name: Cache homebrew
|
||||
# uses: actions/cache@v1.0.3
|
||||
# if: matrix.os == 'macOS-latest'
|
||||
# with:
|
||||
# path: ~/Library/Caches/Homebrew
|
||||
# key: ${{ runner.os }}-brew-cache
|
||||
|
||||
# - name: Cache apt
|
||||
# uses: actions/cache@v1.0.3
|
||||
# if: matrix.os == 'ubuntu-latest'
|
||||
# with:
|
||||
# path: /var/cache/apt/archives
|
||||
# key: ${{ runner.os }}-apt-cache
|
||||
|
||||
# Install dependencies
|
||||
- name: Install dependencies macOS
|
||||
run: brew update; brew install openssl sqlite libpq mysql
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install dependencies Ubuntu
|
||||
run: sudo apt-get update && sudo apt-get install --no-install-recommends openssl sqlite libpq-dev libmysql++-dev
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install dependencies Windows
|
||||
run: vcpkg integrate install; vcpkg install sqlite3:x64-windows openssl:x64-windows libpq:x64-windows libmysql:x64-windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
VCPKG_ROOT: 'C:\vcpkg'
|
||||
# End Install dependencies
|
||||
|
||||
# Install rust nightly toolchain
|
||||
- name: Cache cargo registry
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: ~/.cargo/registry
|
||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo index
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: ~/.cargo/git
|
||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo build
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: target
|
||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install latest nightly
|
||||
uses: actions-rs/toolchain@v1.0.5
|
||||
with:
|
||||
# Uses rust-toolchain to determine version
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
# Build
|
||||
- name: Build Win
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: cargo.exe build --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
||||
env:
|
||||
RUSTFLAGS: -Ctarget-feature=+crt-static
|
||||
VCPKG_ROOT: 'C:\vcpkg'
|
||||
|
||||
- name: Build macOS / Ubuntu
|
||||
if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest'
|
||||
run: cargo build --verbose --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
||||
|
||||
# Test
|
||||
- name: Run tests
|
||||
run: cargo test --features ${{ matrix.db-backend }}
|
||||
|
||||
# Upload & Release
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v1.0.0
|
||||
with:
|
||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
|
||||
- name: Release
|
||||
uses: Shopify/upload-to-release@1.0.0
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
21
.travis.yml
21
.travis.yml
@@ -1,21 +0,0 @@
|
||||
dist: xenial
|
||||
|
||||
env:
|
||||
global:
|
||||
- HADOLINT_VERSION=1.17.1
|
||||
|
||||
language: rust
|
||||
rust: nightly
|
||||
cache: cargo
|
||||
|
||||
before_install:
|
||||
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
|
||||
- sudo chmod +rx /usr/local/bin/hadolint
|
||||
- rustup set profile minimal
|
||||
|
||||
# Nothing to install
|
||||
install: true
|
||||
script:
|
||||
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
|
||||
- cargo test --features "sqlite"
|
||||
- cargo test --features "mysql"
|
1952
Cargo.lock
generated
1952
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
86
Cargo.toml
86
Cargo.toml
@@ -16,6 +16,12 @@ enable_syslog = []
|
||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||
# Enable to use a vendored and statically linked openssl
|
||||
vendored_openssl = ["openssl/vendored"]
|
||||
|
||||
# Enable unstable features, requires nightly
|
||||
# Currently only used to enable rusts official ip support
|
||||
unstable = []
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
syslog = "4.0.1"
|
||||
@@ -26,108 +32,106 @@ rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
||||
rocket_contrib = "0.5.0-dev"
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.10.4", features = ["blocking", "json"] }
|
||||
reqwest = { version = "0.11.2", features = ["blocking", "json"] }
|
||||
|
||||
# multipart/form-data support
|
||||
multipart = { version = "0.16.1", features = ["server"], default-features = false }
|
||||
multipart = { version = "0.17.1", features = ["server"], default-features = false }
|
||||
|
||||
# WebSockets library
|
||||
ws = "0.9.1"
|
||||
ws = { version = "0.10.0", package = "parity-ws" }
|
||||
|
||||
# MessagePack library
|
||||
rmpv = "0.4.4"
|
||||
rmpv = "0.4.7"
|
||||
|
||||
# Concurrent hashmap implementation
|
||||
chashmap = "2.2.2"
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = "1.0.106"
|
||||
serde_derive = "1.0.106"
|
||||
serde_json = "1.0.51"
|
||||
serde = { version = "1.0.125", features = ["derive"] }
|
||||
serde_json = "1.0.64"
|
||||
|
||||
# Logging
|
||||
log = "0.4.8"
|
||||
log = "0.4.14"
|
||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.4.4", features = [ "chrono", "r2d2"] }
|
||||
diesel = { version = "1.4.6", features = [ "chrono", "r2d2"] }
|
||||
diesel_migrations = "1.4.0"
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.17.3", features = ["bundled"], optional = true }
|
||||
libsqlite3-sys = { version = "0.20.1", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto library
|
||||
ring = "0.16.12"
|
||||
# Crypto-related libraries
|
||||
rand = "0.8.3"
|
||||
ring = "0.16.20"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "0.8.1", features = ["v4"] }
|
||||
uuid = { version = "0.8.2", features = ["v4"] }
|
||||
|
||||
# Date and time librar for Rust
|
||||
chrono = "0.4.11"
|
||||
time = "0.2.9"
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
chrono-tz = "0.5.3"
|
||||
time = "0.2.26"
|
||||
|
||||
# TOTP library
|
||||
oath = "0.10.2"
|
||||
|
||||
# Data encoding library
|
||||
data-encoding = "2.2.0"
|
||||
data-encoding = "2.3.2"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "7.1.0"
|
||||
jsonwebtoken = "7.2.0"
|
||||
|
||||
# U2F library
|
||||
u2f = "0.2.0"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.9.0", features = ["online-tokio"], default-features = false }
|
||||
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenv = { version = "0.15.0", default-features = false }
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.3.1"
|
||||
|
||||
# More derives
|
||||
derive_more = "0.99.5"
|
||||
once_cell = "1.7.2"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.11"
|
||||
num-derive = "0.3.0"
|
||||
num-traits = "0.2.14"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# Email libraries
|
||||
lettre = "0.10.0-pre"
|
||||
native-tls = "0.2.4"
|
||||
quoted_printable = "0.4.2"
|
||||
lettre = { version = "0.10.0-beta.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||
newline-converter = "0.2.0"
|
||||
|
||||
# Template library
|
||||
handlebars = { version = "3.0.1", features = ["dir_source"] }
|
||||
handlebars = { version = "3.5.3", features = ["dir_source"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
soup = "0.5.0"
|
||||
regex = "1.3.6"
|
||||
html5ever = "0.25.1"
|
||||
markup5ever_rcdom = "0.1.0"
|
||||
regex = { version = "1.4.5", features = ["std", "perf"], default-features = false }
|
||||
data-url = "0.1.0"
|
||||
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.29"
|
||||
openssl = "0.10.33"
|
||||
|
||||
# URL encoding library
|
||||
percent-encoding = "2.1.0"
|
||||
# Punycode conversion
|
||||
idna = "0.2.0"
|
||||
idna = "0.2.2"
|
||||
|
||||
# CLI argument parsing
|
||||
structopt = "0.3.13"
|
||||
pico-args = "0.4.0"
|
||||
|
||||
# Logging panics to logfile instead stderr only
|
||||
backtrace = "0.3.46"
|
||||
backtrace = "0.3.56"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.5"
|
||||
|
||||
[patch.crates-io]
|
||||
# Use newest ring
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dfc9e9aab01d349da32c52db393e35b7fffea63c' }
|
||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dfc9e9aab01d349da32c52db393e35b7fffea63c' }
|
||||
|
||||
# Use git version for timeout fix #706
|
||||
lettre = { git = 'https://github.com/lettre/lettre', rev = '245c600c82ee18b766e8729f005ff453a55dce34' }
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
|
||||
# For favicon extraction from main website
|
||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '7f1bd6ce1c2fde599a757302a843a60e714c5f72' }
|
||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
||||
|
@@ -1 +1 @@
|
||||
docker/amd64/sqlite/Dockerfile
|
||||
docker/amd64/Dockerfile
|
@@ -21,7 +21,6 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
||||
|
||||
Basically full implementation of Bitwarden API is provided including:
|
||||
|
||||
* Single user functionality
|
||||
* Organizations support
|
||||
* Attachments
|
||||
* Vault API support
|
||||
@@ -59,3 +58,4 @@ If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org
|
||||
Thanks for your contribution to the project!
|
||||
|
||||
- [@ChonoN](https://github.com/ChonoN)
|
||||
- [@themightychris](https://github.com/themightychris)
|
||||
|
@@ -1,5 +1,5 @@
|
||||
pool:
|
||||
vmImage: 'Ubuntu-16.04'
|
||||
vmImage: 'Ubuntu-18.04'
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
@@ -10,16 +10,13 @@ steps:
|
||||
|
||||
- script: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libmysql++-dev
|
||||
displayName: Install libmysql
|
||||
sudo apt-get install -y --no-install-recommends build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||
displayName: 'Install build libraries.'
|
||||
|
||||
- script: |
|
||||
rustc -Vv
|
||||
cargo -V
|
||||
displayName: Query rust and cargo versions
|
||||
|
||||
- script : cargo test --features "sqlite"
|
||||
displayName: 'Test project with sqlite backend'
|
||||
|
||||
- script : cargo test --features "mysql"
|
||||
displayName: 'Test project with mysql backend'
|
||||
- script : cargo test --features "sqlite,mysql,postgresql"
|
||||
displayName: 'Test project with sqlite, mysql and postgresql backends'
|
||||
|
13
build.rs
13
build.rs
@@ -2,12 +2,13 @@ use std::process::Command;
|
||||
use std::env;
|
||||
|
||||
fn main() {
|
||||
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
||||
compile_error!("Can't enable both sqlite and mysql at the same time");
|
||||
#[cfg(all(feature = "sqlite", feature = "postgresql"))]
|
||||
compile_error!("Can't enable both sqlite and postgresql at the same time");
|
||||
#[cfg(all(feature = "mysql", feature = "postgresql"))]
|
||||
compile_error!("Can't enable both mysql and postgresql at the same time");
|
||||
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||
#[cfg(feature = "sqlite")]
|
||||
println!("cargo:rustc-cfg=sqlite");
|
||||
#[cfg(feature = "mysql")]
|
||||
println!("cargo:rustc-cfg=mysql");
|
||||
#[cfg(feature = "postgresql")]
|
||||
println!("cargo:rustc-cfg=postgresql");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||
|
33
docker/Dockerfile.buildx
Normal file
33
docker/Dockerfile.buildx
Normal file
@@ -0,0 +1,33 @@
|
||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||
# manifest, rather than the target arch. For example:
|
||||
#
|
||||
# $ docker inspect bitwardenrs/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||
# amd64
|
||||
#
|
||||
# Recent versions of Docker have started printing a warning when the image's
|
||||
# claimed arch doesn't match the host arch. For example:
|
||||
#
|
||||
# WARNING: The requested image's platform (linux/amd64) does not match the
|
||||
# detected host platform (linux/arm/v7) and no specific platform was requested
|
||||
#
|
||||
# The image still works fine, but the spurious warning creates confusion.
|
||||
#
|
||||
# Docker doesn't seem to provide a way to directly set the arch of an image
|
||||
# at build time. To resolve the build vs. target arch discrepancy, we use
|
||||
# Docker Buildx to build a new set of images with the correct target arch.
|
||||
#
|
||||
# Docker Buildx uses this Dockerfile to build an image for each requested
|
||||
# platform. Since the Dockerfile basically consists of a single `FROM`
|
||||
# instruction, we're effectively telling Buildx to build a platform-specific
|
||||
# image by simply copying the existing cross-built image and setting the
|
||||
# correct target arch as a side effect.
|
||||
#
|
||||
# References:
|
||||
#
|
||||
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
||||
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
||||
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
||||
#
|
||||
ARG LOCAL_REPO
|
||||
ARG DOCKER_TAG
|
||||
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
@@ -1,68 +1,89 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.40" %}
|
||||
{% set build_stage_base_image = "rust:1.50" %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% set build_stage_base_image = "clux/muslrust:nightly-2020-03-09" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.11" %}
|
||||
{% set package_arch_name = "" %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "clux/muslrust:nightly-2021-02-22" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.13" %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.13" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||
{% set package_arch_name = "" %}
|
||||
{% elif "aarch64" in target_file %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||
{% set package_arch_name = "arm64" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||
{% set package_arch_name = "armel" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||
{% set package_arch_name = "armhf" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||
{% endif %}
|
||||
{% if package_arch_name is defined %}
|
||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
||||
{% if package_arch_name == "" %}
|
||||
{% else %}
|
||||
{% set package_arch_prefix = "" %}
|
||||
{% endif %}
|
||||
{% if package_arch_target is defined %}
|
||||
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
||||
{% else %}
|
||||
{% set package_arch_target_param = "" %}
|
||||
{% endif %}
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_image_hash = "sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554" %}
|
||||
{% raw %}
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
{% set vault_version = "2.19.0" %}
|
||||
{% set vault_image_digest = "sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4" %}
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
{% endraw %}
|
||||
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" bitwardenrs/web-vault:v{{ vault_version }}
|
||||
# [bitwardenrs/web-vault@{{ vault_image_digest }}]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" bitwardenrs/web-vault@{{ vault_image_digest }}
|
||||
# [bitwardenrs/web-vault:v{{ vault_version }}]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@{{ vault_image_digest }} as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
{% if "musl" in build_stage_base_image %}
|
||||
# Musl build image for statically compiled binary
|
||||
{% else %}
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
{% endif %}
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
|
||||
{% if "sqlite" in target_file %}
|
||||
# set sqlite as default for DB ARG for backward compatibility
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
{% set features = "sqlite,postgresql" %}
|
||||
{% else %}
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
ARG DB=sqlite
|
||||
|
||||
{% elif "mysql" in target_file %}
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
{% elif "postgresql" in target_file %}
|
||||
# set postgresql backend
|
||||
ARG DB=postgresql
|
||||
|
||||
{% set features = "sqlite" %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% set features = "sqlite,mysql,postgresql" %}
|
||||
{% endif %}
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
@@ -72,9 +93,12 @@ RUN rustup set profile minimal
|
||||
{% if "alpine" in target_file %}
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
{% elif "aarch64" in target_file or "armv" in target_file %}
|
||||
{% if "armv7" in target_file %}
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
{% endif %}
|
||||
{% elif "arm" in target_file %}
|
||||
# Install required build libs for {{ package_arch_name }} architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture {{ package_arch_name }} \
|
||||
@@ -82,77 +106,34 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
libc6-dev{{ package_arch_prefix }}
|
||||
|
||||
{% endif -%}
|
||||
{% if "aarch64" in target_file %}
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
{% elif "armv6" in target_file %}
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
{% elif "armv6" in target_file %}
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
{% elif "armv7" in target_file %}
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
{% endif %}
|
||||
{% if "mysql" in target_file %}
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
{% if "musl" in build_stage_base_image %}
|
||||
libmysqlclient-dev{{ package_arch_prefix }} \
|
||||
{% else %}
|
||||
libc6-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libpq-dev \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
{% endif %}
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libmariadb-dev-compat{{ package_arch_prefix }}
|
||||
|
||||
{% elif "postgresql" in target_file %}
|
||||
# Install PostgreSQL package
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
{% endif -%}
|
||||
|
||||
{% if "amd64" in target_file and "alpine" not in target_file %}
|
||||
# Install DB packages
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
{% endif %}
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -162,39 +143,38 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
{% if "aarch64" in target_file %}
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
{% if "alpine" not in target_file %}
|
||||
{% if "arm" in target_file %}
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
||||
|
||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
{% elif "armv6" in target_file %}
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
{% elif "armv7" in target_file %}
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
{% endif -%}
|
||||
|
||||
{% if "alpine" in target_file %}
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
{% elif "aarch64" in target_file %}
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
{% elif "armv6" in target_file %}
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
|
||||
{% elif "armv7" in target_file %}
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
{% endif %}
|
||||
{% if package_arch_target is defined %}
|
||||
RUN rustup target add {{ package_arch_target }}
|
||||
{% endif %}
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
@@ -206,14 +186,11 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
{% if "amd64" in target_file %}
|
||||
RUN cargo build --features ${DB} --release
|
||||
{% elif "aarch64" in target_file %}
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
{% elif "armv6" in target_file %}
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
{% elif "armv7" in target_file %}
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "armv7" in target_file %}
|
||||
RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
@@ -237,11 +214,14 @@ RUN [ "cross-build-start" ]
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
{% if "sqlite" in target_file %}
|
||||
dumb-init \
|
||||
{% if "sqlite" in features %}
|
||||
sqlite \
|
||||
{% elif "mysql" in target_file %}
|
||||
{% endif %}
|
||||
{% if "mysql" in features %}
|
||||
mariadb-connector-c \
|
||||
{% elif "postgresql" in target_file %}
|
||||
{% endif %}
|
||||
{% if "postgresql" in features %}
|
||||
postgresql-libs \
|
||||
{% endif %}
|
||||
ca-certificates
|
||||
@@ -251,13 +231,10 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
{% if "sqlite" in target_file %}
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
{% elif "mysql" in target_file %}
|
||||
libmariadbclient-dev \
|
||||
{% elif "postgresql" in target_file %}
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
{% endif %}
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
|
||||
@@ -275,22 +252,18 @@ EXPOSE 3012
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
{% if "alpine" in target_file %}
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
{% elif "aarch64" in target_file %}
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
{% elif "armv6" in target_file %}
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
{% elif "armv7" in target_file %}
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
{% if package_arch_target is defined %}
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/bitwarden_rs .
|
||||
{% else %}
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/release/bitwarden_rs .
|
||||
{% endif %}
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -1,4 +1,4 @@
|
||||
OBJECTS := $(shell find -mindepth 2 -name 'Dockerfile*')
|
||||
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
||||
|
||||
all: $(OBJECTS)
|
||||
|
||||
|
3
docker/README.md
Normal file
3
docker/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
||||
|
||||
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
@@ -1,133 +0,0 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install required build libs for arm64 architecture.
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev:arm64 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
@@ -1,29 +1,34 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
FROM rust:1.50 as build
|
||||
|
||||
# set postgresql backend
|
||||
ARG DB=postgresql
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
@@ -31,9 +36,10 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install PostgreSQL package
|
||||
# Install DB packages
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -46,6 +52,7 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -78,6 +85,9 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -90,12 +100,14 @@ EXPOSE 3012
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -1,28 +1,34 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# Musl build image for statically compiled binary
|
||||
FROM clux/muslrust:nightly-2020-03-09 as build
|
||||
FROM clux/muslrust:nightly-2021-02-22 as build
|
||||
|
||||
# set postgresql backend
|
||||
ARG DB=postgresql
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
@@ -33,12 +39,6 @@ RUN rustup set profile minimal
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Install PostgreSQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libpq-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -53,7 +53,7 @@ RUN rustup target add x86_64-unknown-linux-musl
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
@@ -65,12 +65,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.11
|
||||
FROM alpine:3.13
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
@@ -81,6 +81,8 @@ ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite \
|
||||
postgresql-libs \
|
||||
ca-certificates
|
||||
|
||||
@@ -96,9 +98,11 @@ COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -1,101 +0,0 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:buster-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
@@ -1,104 +0,0 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# Musl build image for statically compiled binary
|
||||
FROM clux/muslrust:nightly-2020-03-09 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmysqlclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.11
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
mariadb-connector-c \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
@@ -1,95 +0,0 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward compatibility
|
||||
ARG DB=sqlite
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:buster-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
@@ -1,29 +1,34 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
FROM rust:1.50 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward compatibility
|
||||
ARG DB=sqlite
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
@@ -32,6 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install required build libs for arm64 architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
@@ -39,7 +45,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
@@ -47,7 +57,8 @@ RUN apt-get update \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
@@ -61,6 +72,22 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
@@ -70,7 +97,7 @@ RUN rustup target add aarch64-unknown-linux-gnu
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
@@ -101,7 +128,10 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
@@ -119,9 +149,11 @@ COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -1,29 +1,34 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
FROM rust:1.50 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward compatibility
|
||||
ARG DB=sqlite
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
@@ -32,6 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install required build libs for armel architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
@@ -39,7 +45,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
@@ -47,7 +57,8 @@ RUN apt-get update \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
@@ -61,6 +72,22 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
@@ -70,7 +97,7 @@ RUN rustup target add arm-unknown-linux-gnueabi
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
@@ -101,7 +128,10 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
@@ -119,9 +149,11 @@ COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -1,133 +0,0 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install required build libs for armel architecture.
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev:armel \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
@@ -1,29 +1,34 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
FROM rust:1.50 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward compatibility
|
||||
ARG DB=sqlite
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
@@ -32,6 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install required build libs for armhf architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armhf \
|
||||
@@ -39,7 +45,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
@@ -47,7 +57,8 @@ RUN apt-get update \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
@@ -61,15 +72,32 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
@@ -100,7 +128,10 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
@@ -118,9 +149,11 @@ COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -1,27 +1,33 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# Musl build image for statically compiled binary
|
||||
FROM clux/muslrust:nightly-2020-03-09 as build
|
||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward compatibility
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
ARG DB=sqlite
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
@@ -32,6 +38,7 @@ RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -42,12 +49,12 @@ COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
@@ -59,26 +66,33 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.11
|
||||
FROM balenalib/armv7hf-alpine:3.13
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
@@ -87,12 +101,14 @@ EXPOSE 3012
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -1,132 +0,0 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install required build libs for armhf architecture.
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev:armhf \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
15
docker/start.sh
Executable file
15
docker/start.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ -r /etc/bitwarden_rs.sh ]; then
|
||||
. /etc/bitwarden_rs.sh
|
||||
fi
|
||||
|
||||
if [ -d /etc/bitwarden_rs.d ]; then
|
||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
exec /bitwarden_rs "${@}"
|
20
hooks/README.md
Normal file
20
hooks/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
||||
|
||||
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
||||
|
||||
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
||||
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
||||
* `COMMIT_MSG`: the message from the commit being tested and built.
|
||||
* `DOCKER_REPO`: the name of the Docker repository being built.
|
||||
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||
|
||||
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||
|
||||
## References
|
||||
|
||||
* https://docs.docker.com/docker-hub/builds/advanced/
|
||||
* https://docs.docker.com/engine/reference/commandline/manifest/
|
||||
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
||||
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
16
hooks/arches.sh
Normal file
16
hooks/arches.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
# The default Debian-based images support these arches for all database backends.
|
||||
arches=(
|
||||
amd64
|
||||
armv6
|
||||
armv7
|
||||
arm64
|
||||
)
|
||||
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
# The Alpine image build currently only works for certain arches.
|
||||
distro_suffix=.alpine
|
||||
arches=(
|
||||
amd64
|
||||
armv7
|
||||
)
|
||||
fi
|
45
hooks/build
Executable file
45
hooks/build
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo ">>> Building images..."
|
||||
|
||||
source ./hooks/arches.sh
|
||||
|
||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||
# This var is typically predefined by Docker Hub, but it won't be
|
||||
# when testing locally.
|
||||
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
|
||||
# Construct a version string in the style of `build.rs`.
|
||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||
else
|
||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||
fi
|
||||
|
||||
LABELS=(
|
||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/bitwarden_rs/wiki"
|
||||
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
|
||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||
)
|
||||
LABEL_ARGS=()
|
||||
for label in "${LABELS[@]}"; do
|
||||
LABEL_ARGS+=(--label "${label}")
|
||||
done
|
||||
|
||||
set -ex
|
||||
|
||||
for arch in "${arches[@]}"; do
|
||||
docker build \
|
||||
"${LABEL_ARGS[@]}" \
|
||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||
-f docker/${arch}/Dockerfile${distro_suffix} \
|
||||
.
|
||||
done
|
28
hooks/pre_build
Executable file
28
hooks/pre_build
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# If requested, print some environment info for troubleshooting.
|
||||
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
||||
id
|
||||
pwd
|
||||
df -h
|
||||
env
|
||||
docker info
|
||||
docker version
|
||||
fi
|
||||
|
||||
# Install build dependencies.
|
||||
deps=(
|
||||
jq
|
||||
)
|
||||
apt-get update
|
||||
apt-get install -y "${deps[@]}"
|
||||
|
||||
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
||||
# Git operations that we perform later, so fetch the complete history and
|
||||
# tags first. Note that if the build is cached, the clone may have been
|
||||
# unshallowed already; if so, unshallowing will fail, so skip it.
|
||||
if [[ -f .git/shallow ]]; then
|
||||
git fetch --unshallow --tags
|
||||
fi
|
138
hooks/push
Executable file
138
hooks/push
Executable file
@@ -0,0 +1,138 @@
|
||||
#!/bin/bash
|
||||
|
||||
source ./hooks/arches.sh
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
# Join a list of args with a single char.
|
||||
# Ref: https://stackoverflow.com/a/17841619
|
||||
join() { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
set -ex
|
||||
|
||||
echo ">>> Starting local Docker registry..."
|
||||
|
||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
||||
# builds, but it can't access existing images on the Docker host (like the
|
||||
# cross-compiled ones we just built). Those images first need to be pushed to
|
||||
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
||||
# up those intermediate images on Docker Hub, it's easier to just run a local
|
||||
# Docker registry, which gets cleaned up automatically once the build job ends.
|
||||
#
|
||||
# https://docs.docker.com/registry/deploying/
|
||||
# https://hub.docker.com/_/registry
|
||||
#
|
||||
# Use host networking so the buildx container can access the registry via
|
||||
# localhost.
|
||||
#
|
||||
docker run -d --name registry --network host registry:2 # defaults to port 5000
|
||||
|
||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
||||
LOCAL_REGISTRY="localhost:5000"
|
||||
REPO="${DOCKER_REPO#*/}"
|
||||
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||
|
||||
echo ">>> Pushing images to local registry..."
|
||||
|
||||
for arch in ${arches[@]}; do
|
||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||
docker tag "${docker_image}" "${local_image}"
|
||||
docker push "${local_image}"
|
||||
done
|
||||
|
||||
echo ">>> Setting up Docker Buildx..."
|
||||
|
||||
# Same as earlier, use host networking so the buildx container can access the
|
||||
# registry via localhost.
|
||||
#
|
||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
||||
#
|
||||
docker buildx create --name builder --use --driver-opt network=host
|
||||
|
||||
echo ">>> Running Docker Buildx..."
|
||||
|
||||
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||
|
||||
# If the Docker tag starts with a version number, assume the latest release
|
||||
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
||||
# to make it easier for users to track the latest release.
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
tags+=(${DOCKER_REPO}:alpine)
|
||||
else
|
||||
tags+=(${DOCKER_REPO}:latest)
|
||||
fi
|
||||
fi
|
||||
|
||||
tag_args=()
|
||||
for tag in "${tags[@]}"; do
|
||||
tag_args+=(--tag "${tag}")
|
||||
done
|
||||
|
||||
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
||||
# the arch list to a platform list (assuming the OS is always `linux`).
|
||||
declare -A arch_to_platform=(
|
||||
[amd64]="linux/amd64"
|
||||
[armv6]="linux/arm/v6"
|
||||
[armv7]="linux/arm/v7"
|
||||
[arm64]="linux/arm64"
|
||||
)
|
||||
platforms=()
|
||||
for arch in ${arches[@]}; do
|
||||
platforms+=("${arch_to_platform[$arch]}")
|
||||
done
|
||||
platforms="$(join "," "${platforms[@]}")"
|
||||
|
||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||
# context, which isn't needed here since the actual cross-compiled images
|
||||
# have already been built.
|
||||
docker buildx build \
|
||||
--network host \
|
||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||
--platform "${platforms}" \
|
||||
"${tag_args[@]}" \
|
||||
--push \
|
||||
- < ./docker/Dockerfile.buildx
|
||||
|
||||
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
|
||||
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
#
|
||||
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||
# existing bitwarden_rs tags, which adhere to the naming conventions of the
|
||||
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
|
||||
#
|
||||
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
|
||||
# so this step can be removed once fixed versions are in wider distribution.
|
||||
#
|
||||
# Tags:
|
||||
#
|
||||
# testing => testing-arm32v6
|
||||
# testing-alpine => <ignored>
|
||||
# x.y.z => x.y.z-arm32v6, latest-arm32v6
|
||||
# x.y.z-alpine => <ignored>
|
||||
#
|
||||
if [[ "${DOCKER_TAG}" != *alpine ]]; then
|
||||
image="${DOCKER_REPO}":"${DOCKER_TAG}"
|
||||
|
||||
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
|
||||
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
|
||||
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
|
||||
|
||||
# Pull the armv6 image by digest, retag it, and repush it.
|
||||
docker pull "${DOCKER_REPO}"@"${digest}"
|
||||
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
|
||||
docker push "${image}"-arm32v6
|
||||
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
|
||||
docker push "${DOCKER_REPO}:latest"-arm32v6
|
||||
fi
|
||||
fi
|
@@ -0,0 +1 @@
|
||||
|
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN
|
||||
deleted_at DATETIME;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_collections
|
||||
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;
|
@@ -0,0 +1,13 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
UPDATE ciphers
|
||||
SET favorite = TRUE
|
||||
WHERE EXISTS (
|
||||
SELECT * FROM favorites
|
||||
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||
AND favorites.cipher_uuid = ciphers.uuid
|
||||
);
|
||||
|
||||
DROP TABLE favorites;
|
@@ -0,0 +1,16 @@
|
||||
CREATE TABLE favorites (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers(uuid),
|
||||
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||
SELECT user_uuid, uuid
|
||||
FROM ciphers
|
||||
WHERE favorite = TRUE
|
||||
AND user_uuid IS NOT NULL;
|
||||
|
||||
ALTER TABLE ciphers
|
||||
DROP COLUMN favorite;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
1
migrations/mysql/2021-03-11-190243_add_sends/down.sql
Normal file
1
migrations/mysql/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE sends;
|
25
migrations/mysql/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/mysql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
||||
CREATE TABLE sends (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
|
||||
atype INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
akey TEXT NOT NULL,
|
||||
password_hash BLOB,
|
||||
password_salt BLOB,
|
||||
password_iter INTEGER,
|
||||
|
||||
max_access_count INTEGER,
|
||||
access_count INTEGER NOT NULL,
|
||||
|
||||
creation_date DATETIME NOT NULL,
|
||||
revision_date DATETIME NOT NULL,
|
||||
expiration_date DATETIME,
|
||||
deletion_date DATETIME NOT NULL,
|
||||
|
||||
disabled BOOLEAN NOT NULL
|
||||
);
|
@@ -0,0 +1 @@
|
||||
|
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN
|
||||
deleted_at TIMESTAMP;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_collections
|
||||
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;
|
@@ -0,0 +1,13 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
UPDATE ciphers
|
||||
SET favorite = TRUE
|
||||
WHERE EXISTS (
|
||||
SELECT * FROM favorites
|
||||
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||
AND favorites.cipher_uuid = ciphers.uuid
|
||||
);
|
||||
|
||||
DROP TABLE favorites;
|
@@ -0,0 +1,16 @@
|
||||
CREATE TABLE favorites (
|
||||
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
|
||||
cipher_uuid VARCHAR(40) NOT NULL REFERENCES ciphers(uuid),
|
||||
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||
SELECT user_uuid, uuid
|
||||
FROM ciphers
|
||||
WHERE favorite = TRUE
|
||||
AND user_uuid IS NOT NULL;
|
||||
|
||||
ALTER TABLE ciphers
|
||||
DROP COLUMN favorite;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT true;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
@@ -0,0 +1 @@
|
||||
DROP TABLE sends;
|
25
migrations/postgresql/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/postgresql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
||||
CREATE TABLE sends (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
|
||||
atype INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
password_hash BYTEA,
|
||||
password_salt BYTEA,
|
||||
password_iter INTEGER,
|
||||
|
||||
max_access_count INTEGER,
|
||||
access_count INTEGER NOT NULL,
|
||||
|
||||
creation_date TIMESTAMP NOT NULL,
|
||||
revision_date TIMESTAMP NOT NULL,
|
||||
expiration_date TIMESTAMP,
|
||||
deletion_date TIMESTAMP NOT NULL,
|
||||
|
||||
disabled BOOLEAN NOT NULL
|
||||
);
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE sends RENAME COLUMN key TO akey;
|
@@ -0,0 +1 @@
|
||||
|
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN
|
||||
deleted_at DATETIME;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_collections
|
||||
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
@@ -0,0 +1,13 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
UPDATE ciphers
|
||||
SET favorite = 1
|
||||
WHERE EXISTS (
|
||||
SELECT * FROM favorites
|
||||
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||
AND favorites.cipher_uuid = ciphers.uuid
|
||||
);
|
||||
|
||||
DROP TABLE favorites;
|
@@ -0,0 +1,71 @@
|
||||
CREATE TABLE favorites (
|
||||
user_uuid TEXT NOT NULL REFERENCES users(uuid),
|
||||
cipher_uuid TEXT NOT NULL REFERENCES ciphers(uuid),
|
||||
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||
SELECT user_uuid, uuid
|
||||
FROM ciphers
|
||||
WHERE favorite = 1
|
||||
AND user_uuid IS NOT NULL;
|
||||
|
||||
-- Drop the `favorite` column from the `ciphers` table, using the 12-step
|
||||
-- procedure from <https://www.sqlite.org/lang_altertable.html#altertabrename>.
|
||||
-- Note that some steps aren't applicable and are omitted.
|
||||
|
||||
-- 1. If foreign key constraints are enabled, disable them using PRAGMA foreign_keys=OFF.
|
||||
--
|
||||
-- Diesel runs each migration in its own transaction. `PRAGMA foreign_keys`
|
||||
-- is a no-op within a transaction, so this step must be done outside of this
|
||||
-- file, before starting the Diesel migrations.
|
||||
|
||||
-- 2. Start a transaction.
|
||||
--
|
||||
-- Diesel already runs each migration in its own transaction.
|
||||
|
||||
-- 4. Use CREATE TABLE to construct a new table "new_X" that is in the
|
||||
-- desired revised format of table X. Make sure that the name "new_X" does
|
||||
-- not collide with any existing table name, of course.
|
||||
|
||||
CREATE TABLE new_ciphers(
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid TEXT REFERENCES users(uuid),
|
||||
organization_uuid TEXT REFERENCES organizations(uuid),
|
||||
atype INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
fields TEXT,
|
||||
data TEXT NOT NULL,
|
||||
password_history TEXT,
|
||||
deleted_at DATETIME
|
||||
);
|
||||
|
||||
-- 5. Transfer content from X into new_X using a statement like:
|
||||
-- INSERT INTO new_X SELECT ... FROM X.
|
||||
|
||||
INSERT INTO new_ciphers(uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||
name, notes, fields, data, password_history, deleted_at)
|
||||
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||
name, notes, fields, data, password_history, deleted_at
|
||||
FROM ciphers;
|
||||
|
||||
-- 6. Drop the old table X: DROP TABLE X.
|
||||
|
||||
DROP TABLE ciphers;
|
||||
|
||||
-- 7. Change the name of new_X to X using: ALTER TABLE new_X RENAME TO X.
|
||||
|
||||
ALTER TABLE new_ciphers RENAME TO ciphers;
|
||||
|
||||
-- 11. Commit the transaction started in step 2.
|
||||
|
||||
-- 12. If foreign keys constraints were originally enabled, reenable them now.
|
||||
--
|
||||
-- `PRAGMA foreign_keys` is scoped to a database connection, and Diesel
|
||||
-- migrations are run in a separate database connection that is closed once
|
||||
-- the migrations finish.
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
1
migrations/sqlite/2021-03-11-190243_add_sends/down.sql
Normal file
1
migrations/sqlite/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE sends;
|
25
migrations/sqlite/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/sqlite/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
||||
CREATE TABLE sends (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
user_uuid TEXT REFERENCES users (uuid),
|
||||
organization_uuid TEXT REFERENCES organizations (uuid),
|
||||
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
|
||||
atype INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
password_hash BLOB,
|
||||
password_salt BLOB,
|
||||
password_iter INTEGER,
|
||||
|
||||
max_access_count INTEGER,
|
||||
access_count INTEGER NOT NULL,
|
||||
|
||||
creation_date DATETIME NOT NULL,
|
||||
revision_date DATETIME NOT NULL,
|
||||
expiration_date DATETIME,
|
||||
deletion_date DATETIME NOT NULL,
|
||||
|
||||
disabled BOOLEAN NOT NULL
|
||||
);
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE sends RENAME COLUMN key TO akey;
|
@@ -1 +1 @@
|
||||
nightly-2020-04-08
|
||||
nightly-2021-02-22
|
443
src/api/admin.rs
443
src/api/admin.rs
@@ -1,20 +1,27 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
use std::process::Command;
|
||||
use std::{env, process::Command, time::Duration};
|
||||
|
||||
use rocket::http::{Cookie, Cookies, SameSite};
|
||||
use rocket::request::{self, FlashMessage, Form, FromRequest, Request};
|
||||
use rocket::response::{content::Html, Flash, Redirect};
|
||||
use rocket::{Outcome, Route};
|
||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||
use rocket::{
|
||||
http::{Cookie, Cookies, SameSite},
|
||||
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||
response::{content::Html, Flash, Redirect},
|
||||
Route,
|
||||
};
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
||||
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp};
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::db::{backup_database, models::*, DbConn};
|
||||
use crate::error::Error;
|
||||
use crate::mail;
|
||||
use crate::CONFIG;
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, NumberOrString},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||
error::{Error, MapResult},
|
||||
mail,
|
||||
util::{format_naive_datetime_local, get_display_size, is_running_in_docker},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||
@@ -23,24 +30,46 @@ pub fn routes() -> Vec<Route> {
|
||||
|
||||
routes![
|
||||
admin_login,
|
||||
get_users,
|
||||
get_users_json,
|
||||
post_admin_login,
|
||||
admin_page,
|
||||
invite_user,
|
||||
logout,
|
||||
delete_user,
|
||||
deauth_user,
|
||||
disable_user,
|
||||
enable_user,
|
||||
remove_2fa,
|
||||
update_user_org_type,
|
||||
update_revision_users,
|
||||
post_config,
|
||||
delete_config,
|
||||
backup_db,
|
||||
test_smtp,
|
||||
users_overview,
|
||||
organizations_overview,
|
||||
delete_organization,
|
||||
diagnostics,
|
||||
get_diagnostics_config
|
||||
]
|
||||
}
|
||||
|
||||
static CAN_BACKUP: Lazy<bool> =
|
||||
Lazy::new(|| cfg!(feature = "sqlite") && Command::new("sqlite3").arg("-version").status().is_ok());
|
||||
static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||
DbConnType::from_url(&CONFIG.database_url())
|
||||
.map(|t| match t {
|
||||
DbConnType::sqlite => "SQLite",
|
||||
DbConnType::mysql => "MySQL",
|
||||
DbConnType::postgresql => "PostgreSQL",
|
||||
})
|
||||
.unwrap_or("Unknown")
|
||||
});
|
||||
|
||||
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
|
||||
DbConnType::from_url(&CONFIG.database_url())
|
||||
.map(|t| t == DbConnType::sqlite)
|
||||
.unwrap_or(false)
|
||||
&& Command::new("sqlite3").arg("-version").status().is_ok()
|
||||
});
|
||||
|
||||
#[get("/")]
|
||||
fn admin_disabled() -> &'static str {
|
||||
@@ -57,6 +86,58 @@ fn admin_path() -> String {
|
||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||
}
|
||||
|
||||
struct Referer(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct IpHeader(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
||||
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
||||
} else if req.headers().get_one("X-Client-IP").is_some() {
|
||||
Outcome::Success(IpHeader(Some(String::from("X-Client-IP"))))
|
||||
} else if req.headers().get_one("X-Real-IP").is_some() {
|
||||
Outcome::Success(IpHeader(Some(String::from("X-Real-IP"))))
|
||||
} else if req.headers().get_one("X-Forwarded-For").is_some() {
|
||||
Outcome::Success(IpHeader(Some(String::from("X-Forwarded-For"))))
|
||||
} else {
|
||||
Outcome::Success(IpHeader(None))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Used for `Location` response headers, which must specify an absolute URI
|
||||
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||
fn admin_url(referer: Referer) -> String {
|
||||
// If we get a referer use that to make it work when, DOMAIN is not set
|
||||
if let Some(mut referer) = referer.0 {
|
||||
if let Some(start_index) = referer.find(ADMIN_PATH) {
|
||||
referer.truncate(start_index + ADMIN_PATH.len());
|
||||
return referer;
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.domain_set() {
|
||||
// Don't use CONFIG.domain() directly, since the user may want to keep a
|
||||
// trailing slash there, particularly when running under a subpath.
|
||||
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
|
||||
} else {
|
||||
// Last case, when no referer or domain set, technically invalid but better than nothing
|
||||
ADMIN_PATH.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/", rank = 2)]
|
||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
// If there is an error, show it
|
||||
@@ -74,14 +155,19 @@ struct LoginForm {
|
||||
}
|
||||
|
||||
#[post("/", data = "<data>")]
|
||||
fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -> Result<Redirect, Flash<Redirect>> {
|
||||
fn post_admin_login(
|
||||
data: Form<LoginForm>,
|
||||
mut cookies: Cookies,
|
||||
ip: ClientIp,
|
||||
referer: Referer,
|
||||
) -> Result<Redirect, Flash<Redirect>> {
|
||||
let data = data.into_inner();
|
||||
|
||||
// If the token is invalid, redirect to login page
|
||||
if !_validate_token(&data.token) {
|
||||
error!("Invalid admin token. IP: {}", ip.ip);
|
||||
Err(Flash::error(
|
||||
Redirect::to(admin_path()),
|
||||
Redirect::to(admin_url(referer)),
|
||||
"Invalid admin token, please try again.",
|
||||
))
|
||||
} else {
|
||||
@@ -97,7 +183,7 @@ fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -
|
||||
.finish();
|
||||
|
||||
cookies.add(cookie);
|
||||
Ok(Redirect::to(admin_path()))
|
||||
Ok(Redirect::to(admin_url(referer)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,7 +198,9 @@ fn _validate_token(token: &str) -> bool {
|
||||
struct AdminTemplateData {
|
||||
page_content: String,
|
||||
version: Option<&'static str>,
|
||||
users: Vec<Value>,
|
||||
users: Option<Vec<Value>>,
|
||||
organizations: Option<Vec<Value>>,
|
||||
diagnostics: Option<Value>,
|
||||
config: Value,
|
||||
can_backup: bool,
|
||||
logged_in: bool,
|
||||
@@ -120,15 +208,59 @@ struct AdminTemplateData {
|
||||
}
|
||||
|
||||
impl AdminTemplateData {
|
||||
fn new(users: Vec<Value>) -> Self {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/page"),
|
||||
page_content: String::from("admin/settings"),
|
||||
version: VERSION,
|
||||
users,
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
users: None,
|
||||
organizations: None,
|
||||
diagnostics: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn users(users: Vec<Value>) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/users"),
|
||||
version: VERSION,
|
||||
users: Some(users),
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
organizations: None,
|
||||
diagnostics: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn organizations(organizations: Vec<Value>) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/organizations"),
|
||||
version: VERSION,
|
||||
organizations: Some(organizations),
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
users: None,
|
||||
diagnostics: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn diagnostics(diagnostics: Value) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/diagnostics"),
|
||||
version: VERSION,
|
||||
organizations: None,
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
users: None,
|
||||
diagnostics: Some(diagnostics),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,11 +270,8 @@ impl AdminTemplateData {
|
||||
}
|
||||
|
||||
#[get("/", rank = 1)]
|
||||
fn admin_page(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||
|
||||
let text = AdminTemplateData::new(users_json).render()?;
|
||||
fn admin_page(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let text = AdminTemplateData::new().render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
@@ -174,69 +303,287 @@ fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> Empt
|
||||
#[post("/test/smtp", data = "<data>")]
|
||||
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
let email = data.email.clone();
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_test(&email)
|
||||
mail::send_test(&data.email)
|
||||
} else {
|
||||
err!("Mail is not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/logout")]
|
||||
fn logout(mut cookies: Cookies) -> Result<Redirect, ()> {
|
||||
fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
Ok(Redirect::to(admin_path()))
|
||||
Redirect::to(admin_url(referer))
|
||||
}
|
||||
|
||||
#[get("/users")]
|
||||
fn get_users(_token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||
let users = User::get_all(&conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||
|
||||
Ok(Json(Value::Array(users_json)))
|
||||
Json(Value::Array(users_json))
|
||||
}
|
||||
|
||||
#[get("/users/overview")]
|
||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||
let users_json: Vec<Value> = users.iter()
|
||||
.map(|u| {
|
||||
let mut usr = u.to_json(&conn);
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||
usr["last_active"] = match u.last_active(&conn) {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||
None => json!("Never")
|
||||
};
|
||||
usr
|
||||
})
|
||||
.collect();
|
||||
|
||||
let text = AdminTemplateData::users(users_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
|
||||
let user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
user.delete(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
user.enabled = true;
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct UserOrgTypeData {
|
||||
user_type: NumberOrString,
|
||||
user_uuid: String,
|
||||
org_uuid: String,
|
||||
}
|
||||
|
||||
#[post("/users/org_type", data = "<data>")]
|
||||
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let data: UserOrgTypeData = data.into_inner();
|
||||
|
||||
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
|
||||
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
None => err!("Invalid type"),
|
||||
};
|
||||
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there are at least another owner
|
||||
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
if num_owners <= 1 {
|
||||
err!("Can't change the type of the last owner")
|
||||
}
|
||||
}
|
||||
|
||||
user_to_edit.atype = new_type as i32;
|
||||
user_to_edit.save(&conn)
|
||||
}
|
||||
|
||||
|
||||
#[post("/users/update_revision")]
|
||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&conn)
|
||||
}
|
||||
|
||||
#[get("/organizations/overview")]
|
||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations = Organization::get_all(&conn);
|
||||
let organizations_json: Vec<Value> = organizations.iter()
|
||||
.map(|o| {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||
org
|
||||
})
|
||||
.collect();
|
||||
|
||||
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/organizations/<uuid>/delete")]
|
||||
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
||||
org.delete(&conn)
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct WebVaultVersion {
|
||||
version: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GitRelease {
|
||||
tag_name: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GitCommit {
|
||||
sha: String,
|
||||
}
|
||||
|
||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let github_api = Client::builder().build()?;
|
||||
|
||||
Ok(github_api
|
||||
.get(url)
|
||||
.timeout(Duration::from_secs(10))
|
||||
.header(USER_AGENT, "Bitwarden_RS")
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json::<T>()?)
|
||||
}
|
||||
|
||||
fn has_http_access() -> bool {
|
||||
let http_access = Client::builder().build().unwrap();
|
||||
|
||||
match http_access
|
||||
.head("https://github.com/dani-garcia/bitwarden_rs")
|
||||
.timeout(Duration::from_secs(10))
|
||||
.header(USER_AGENT, "Bitwarden_RS")
|
||||
.send()
|
||||
{
|
||||
Ok(r) => r.status().is_success(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use crate::util::read_file_string;
|
||||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
||||
let vault_version_str = read_file_string(&vault_version_path)?;
|
||||
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
||||
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
let has_http_access = has_http_access();
|
||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||
|| env::var_os("http_proxy").is_some()
|
||||
|| env::var_os("HTTPS_PROXY").is_some()
|
||||
|| env::var_os("https_proxy").is_some();
|
||||
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
}
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
// Do not fetch the web-vault version when running within Docker.
|
||||
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||
if running_within_docker {
|
||||
"-".to_string()
|
||||
} else {
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
}
|
||||
},
|
||||
)
|
||||
} else {
|
||||
("-".to_string(), "-".to_string(), "-".to_string())
|
||||
};
|
||||
|
||||
let ip_header_name = match &ip_header.0 {
|
||||
Some(h) => h,
|
||||
_ => ""
|
||||
};
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"latest_release": latest_release,
|
||||
"latest_commit": latest_commit,
|
||||
"latest_web_build": latest_web_build,
|
||||
"running_within_docker": running_within_docker,
|
||||
"has_http_access": has_http_access,
|
||||
"ip_header_exists": &ip_header.0.is_some(),
|
||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||
"ip_header_name": ip_header_name,
|
||||
"ip_header_config": &CONFIG.ip_header(),
|
||||
"uses_proxy": uses_proxy,
|
||||
"db_type": *DB_TYPE,
|
||||
"db_version": get_sql_server_version(&conn),
|
||||
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||
});
|
||||
|
||||
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/diagnostics/config")]
|
||||
fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
||||
let support_json = CONFIG.get_support_json();
|
||||
Json(support_json)
|
||||
}
|
||||
|
||||
#[post("/config", data = "<data>")]
|
||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
let data: ConfigBuilder = data.into_inner();
|
||||
|
@@ -1,19 +1,16 @@
|
||||
use chrono::Utc;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType};
|
||||
use crate::auth::{decode_delete, decode_invite, decode_verify_email, Headers};
|
||||
use crate::crypto;
|
||||
use crate::mail;
|
||||
|
||||
use crate::CONFIG;
|
||||
|
||||
use rocket::Route;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
register,
|
||||
profile,
|
||||
@@ -36,6 +33,7 @@ pub fn routes() -> Vec<Route> {
|
||||
revision_date,
|
||||
password_hint,
|
||||
prelogin,
|
||||
verify_password,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -68,7 +66,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
||||
Some(user) => {
|
||||
if !user.password_hash.is_empty() {
|
||||
if CONFIG.signups_allowed() {
|
||||
if CONFIG.is_signup_allowed(&data.Email) {
|
||||
err!("User already exists")
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
@@ -89,14 +87,17 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
user
|
||||
} else if CONFIG.signups_allowed() {
|
||||
} else if CONFIG.is_signup_allowed(&data.Email) {
|
||||
err!("Account with this email already exists")
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
}
|
||||
None => {
|
||||
if CONFIG.signups_allowed() || Invitation::take(&data.Email, &conn) || CONFIG.can_signup_user(&data.Email) {
|
||||
// Order is important here; the invitation check must come first
|
||||
// because the bitwarden_rs admin can invite anyone, regardless
|
||||
// of other signup restrictions.
|
||||
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
||||
User::new(data.Email.clone())
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
@@ -115,7 +116,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
user.client_kdf_type = client_kdf_type;
|
||||
}
|
||||
|
||||
user.set_password(&data.MasterPasswordHash);
|
||||
user.set_password(&data.MasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
|
||||
// Add extra fields if present
|
||||
@@ -139,19 +140,17 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
user.last_verifying_at = Some(user.created_at);
|
||||
} else {
|
||||
if let Err(e) = mail::send_welcome(&user.email) {
|
||||
} else if let Err(e) = mail::send_welcome(&user.email) {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[get("/accounts/profile")]
|
||||
fn profile(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
Ok(Json(headers.user.to_json(&conn)))
|
||||
fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&conn))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -207,7 +206,12 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
||||
user.public_key = Some(data.PublicKey);
|
||||
|
||||
user.save(&conn)?;
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
|
||||
Ok(Json(json!({
|
||||
"PrivateKey": user.private_key,
|
||||
"PublicKey": user.public_key,
|
||||
"Object":"keys"
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -227,7 +231,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.set_password(&data.NewMasterPasswordHash, Some("post_rotatekey"));
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
}
|
||||
@@ -254,7 +258,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
||||
|
||||
user.client_kdf_iter = data.KdfIterations;
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
}
|
||||
@@ -333,6 +337,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
user.akey = data.Key;
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
user.reset_stamp_exception();
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
@@ -371,8 +376,8 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
if !CONFIG.signups_allowed() && !CONFIG.can_signup_user(&data.NewEmail) {
|
||||
err!("Email cannot be changed to this address");
|
||||
if !CONFIG.is_email_domain_allowed(&data.NewEmail) {
|
||||
err!("Email domain not allowed");
|
||||
}
|
||||
|
||||
let token = crypto::generate_token(6)?;
|
||||
@@ -440,7 +445,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
||||
user.email_new = None;
|
||||
user.email_new_token = None;
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
|
||||
user.save(&conn)
|
||||
@@ -455,7 +460,7 @@ fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||
error!("Error sending delete account email: {:#?}", e);
|
||||
error!("Error sending verify_email email: {:#?}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -606,7 +611,7 @@ struct PreloginData {
|
||||
}
|
||||
|
||||
#[post("/accounts/prelogin", data = "<data>")]
|
||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
let data: PreloginData = data.into_inner().data;
|
||||
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||
@@ -614,8 +619,25 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Kdf": kdf_type,
|
||||
"KdfIterations": kdf_iter
|
||||
})))
|
||||
}))
|
||||
}
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct VerifyPasswordData {
|
||||
MasterPasswordHash: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-password", data = "<data>")]
|
||||
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||
let data: VerifyPasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -1,28 +1,33 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::Path;
|
||||
|
||||
use rocket::http::ContentType;
|
||||
use rocket::{request::Form, Data, Route};
|
||||
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use rocket::{http::ContentType, request::Form, Data, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use multipart::server::save::SavedData;
|
||||
use multipart::server::{Multipart, SaveResult};
|
||||
|
||||
use data_encoding::HEXLOWER;
|
||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::crypto;
|
||||
|
||||
use crate::api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType};
|
||||
use crate::auth::Headers;
|
||||
|
||||
use crate::CONFIG;
|
||||
use crate::{
|
||||
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
// Note that many routes have an `admin` variant; this seems to be
|
||||
// because the stored procedure that upstream Bitwarden uses to determine
|
||||
// whether the user can edit a cipher doesn't take into account whether
|
||||
// the user is an org owner/admin. The `admin` variant first checks
|
||||
// whether the user is an owner/admin of the relevant org, and if so,
|
||||
// allows the operation unconditionally.
|
||||
//
|
||||
// bitwarden_rs factors in the org owner/admin status as part of
|
||||
// determining the write accessibility of a cipher, so most
|
||||
// admin/non-admin implementations can be shared.
|
||||
routes![
|
||||
sync,
|
||||
get_ciphers,
|
||||
@@ -44,15 +49,24 @@ pub fn routes() -> Vec<Route> {
|
||||
post_cipher_admin,
|
||||
post_cipher_share,
|
||||
put_cipher_share,
|
||||
put_cipher_share_seleted,
|
||||
put_cipher_share_selected,
|
||||
post_cipher,
|
||||
put_cipher,
|
||||
delete_cipher_post,
|
||||
delete_cipher_post_admin,
|
||||
delete_cipher_put,
|
||||
delete_cipher_put_admin,
|
||||
delete_cipher,
|
||||
delete_cipher_admin,
|
||||
delete_cipher_selected,
|
||||
delete_cipher_selected_post,
|
||||
delete_cipher_selected_put,
|
||||
delete_cipher_selected_admin,
|
||||
delete_cipher_selected_post_admin,
|
||||
delete_cipher_selected_put_admin,
|
||||
restore_cipher_put,
|
||||
restore_cipher_put_admin,
|
||||
restore_cipher_selected,
|
||||
delete_all,
|
||||
move_cipher_selected,
|
||||
move_cipher_selected_put,
|
||||
@@ -70,55 +84,64 @@ struct SyncData {
|
||||
}
|
||||
|
||||
#[get("/sync?<data..>")]
|
||||
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let user_json = headers.user.to_json(&conn);
|
||||
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
||||
let collections_json: Vec<Value> = collections.iter()
|
||||
.map(|c| c.to_json_details(&headers.user.uuid, &conn))
|
||||
.collect();
|
||||
|
||||
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
.collect();
|
||||
|
||||
let sends = Send::find_by_user(&headers.user.uuid, &conn);
|
||||
let sends_json: Vec<Value> = sends
|
||||
.iter()
|
||||
.map(|s| s.to_json())
|
||||
.collect();
|
||||
|
||||
let domains_json = if data.exclude_domains {
|
||||
Value::Null
|
||||
} else {
|
||||
api::core::_get_eq_domains(headers, true).unwrap().into_inner()
|
||||
api::core::_get_eq_domains(headers, true).into_inner()
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Profile": user_json,
|
||||
"Folders": folders_json,
|
||||
"Collections": collections_json,
|
||||
"Policies": policies_json,
|
||||
"Ciphers": ciphers_json,
|
||||
"Domains": domains_json,
|
||||
"Sends": sends_json,
|
||||
"Object": "sync"
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/ciphers")]
|
||||
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
||||
fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
.collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": ciphers_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<uuid>")]
|
||||
@@ -181,6 +204,14 @@ pub struct CipherData {
|
||||
#[serde(rename = "Attachments")]
|
||||
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
||||
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
||||
|
||||
// The revision datetime (in ISO 8601 format) of the client's local copy
|
||||
// of the cipher. This is used to prevent a client from updating a cipher
|
||||
// when it doesn't have the latest version, as that can result in data
|
||||
// loss. It's not an error when no value is provided; this can happen
|
||||
// when using older client versions, or if the operation doesn't involve
|
||||
// updating an existing cipher.
|
||||
LastKnownRevisionDate: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -190,22 +221,46 @@ pub struct Attachments2Data {
|
||||
Key: String,
|
||||
}
|
||||
|
||||
/// Called when an org admin clones an org cipher.
|
||||
#[post("/ciphers/admin", data = "<data>")]
|
||||
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
let data: ShareCipherData = data.into_inner().data;
|
||||
post_ciphers_create(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
/// Called when creating a new org-owned cipher, or cloning a cipher (whether
|
||||
/// user- or org-owned). When cloning a cipher to a user-owned cipher,
|
||||
/// `organizationId` is null.
|
||||
#[post("/ciphers/create", data = "<data>")]
|
||||
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
let mut data: ShareCipherData = data.into_inner().data;
|
||||
|
||||
// Check if there are one more more collections selected when this cipher is part of an organization.
|
||||
// err if this is not the case before creating an empty cipher.
|
||||
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
|
||||
err!("You must select at least one collection.");
|
||||
}
|
||||
|
||||
// This check is usually only needed in update_cipher_from_data(), but we
|
||||
// need it here as well to avoid creating an empty cipher in the call to
|
||||
// cipher.save() below.
|
||||
enforce_personal_ownership_policy(&data.Cipher, &headers, &conn)?;
|
||||
|
||||
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
||||
cipher.user_uuid = Some(headers.user.uuid.clone());
|
||||
cipher.save(&conn)?;
|
||||
|
||||
// When cloning a cipher, the Bitwarden clients seem to set this field
|
||||
// based on the cipher being cloned (when creating a new cipher, it's set
|
||||
// to null as expected). However, `cipher.created_at` is initialized to
|
||||
// the current time, so the stale data check will end up failing down the
|
||||
// line. Since this function only creates new ciphers (whether by cloning
|
||||
// or otherwise), we can just ignore this field entirely.
|
||||
data.Cipher.LastKnownRevisionDate = None;
|
||||
|
||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/create", data = "<data>")]
|
||||
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
post_ciphers_admin(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
/// Called when creating a new user-owned cipher.
|
||||
#[post("/ciphers", data = "<data>")]
|
||||
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
let data: CipherData = data.into_inner().data;
|
||||
@@ -216,6 +271,29 @@ fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
|
||||
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
|
||||
/// A non-owner/admin user belonging to an org with the personal ownership policy
|
||||
/// enabled isn't allowed to create new user-owned ciphers or modify existing ones
|
||||
/// (that were created before the policy was applicable to the user). The user is
|
||||
/// allowed to delete or share such ciphers to an org, however.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
|
||||
fn enforce_personal_ownership_policy(
|
||||
data: &CipherData,
|
||||
headers: &Headers,
|
||||
conn: &DbConn
|
||||
) -> EmptyResult {
|
||||
if data.OrganizationId.is_none() {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::PersonalOwnership;
|
||||
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
err!("Due to an Enterprise Policy, you are restricted from \
|
||||
saving items to your personal vault.")
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_cipher_from_data(
|
||||
cipher: &mut Cipher,
|
||||
data: CipherData,
|
||||
@@ -225,6 +303,19 @@ pub fn update_cipher_from_data(
|
||||
nt: &Notify,
|
||||
ut: UpdateType,
|
||||
) -> EmptyResult {
|
||||
enforce_personal_ownership_policy(&data, headers, conn)?;
|
||||
|
||||
// Check that the client isn't updating an existing cipher with stale data.
|
||||
if let Some(dt) = data.LastKnownRevisionDate {
|
||||
match NaiveDateTime::parse_from_str(&dt, "%+") { // ISO 8601 format
|
||||
Err(err) =>
|
||||
warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 =>
|
||||
err!("The client copy of this cipher is out of date. Resync the client and try again."),
|
||||
Ok(_) => (),
|
||||
}
|
||||
}
|
||||
|
||||
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
||||
err!("Organization mismatch. Please resync the client before updating the cipher")
|
||||
}
|
||||
@@ -238,6 +329,11 @@ pub fn update_cipher_from_data(
|
||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
||||
{
|
||||
cipher.organization_uuid = Some(org_id);
|
||||
// After some discussion in PR #1329 re-added the user_uuid = None again.
|
||||
// TODO: Audit/Check the whole save/update cipher chain.
|
||||
// Upstream uses the user_uuid to allow a cipher added by a user to an org to still allow the user to view/edit the cipher
|
||||
// even when the user has hide-passwords configured as there policy.
|
||||
// Removing the line below would fix that, but we have to check which effect this would have on the rest of the code.
|
||||
cipher.user_uuid = None;
|
||||
} else {
|
||||
err!("You don't have permission to add cipher directly to organization")
|
||||
@@ -268,7 +364,10 @@ pub fn update_cipher_from_data(
|
||||
};
|
||||
|
||||
if saved_att.cipher_uuid != cipher.uuid {
|
||||
err!("Attachment is not owned by the cipher")
|
||||
// Warn and break here since cloning ciphers provides attachment data but will not be cloned.
|
||||
// If we error out here it will break the whole cloning and causes empty ciphers to appear.
|
||||
warn!("Attachment is not owned by the cipher");
|
||||
break;
|
||||
}
|
||||
|
||||
saved_att.akey = Some(attachment.Key);
|
||||
@@ -278,6 +377,23 @@ pub fn update_cipher_from_data(
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup cipher data, like removing the 'Response' key.
|
||||
// This key is somewhere generated during Javascript so no way for us this fix this.
|
||||
// Also, upstream only retrieves keys they actually want to store, and thus skip the 'Response' key.
|
||||
// We do not mind which data is in it, the keep our model more flexible when there are upstream changes.
|
||||
// But, we at least know we do not need to store and return this specific key.
|
||||
fn _clean_cipher_data(mut json_data: Value) -> Value {
|
||||
if json_data.is_array() {
|
||||
json_data.as_array_mut()
|
||||
.unwrap()
|
||||
.iter_mut()
|
||||
.for_each(|ref mut f| {
|
||||
f.as_object_mut().unwrap().remove("Response");
|
||||
});
|
||||
};
|
||||
json_data
|
||||
}
|
||||
|
||||
let type_data_opt = match data.Type {
|
||||
1 => data.Login,
|
||||
2 => data.SecureNote,
|
||||
@@ -286,29 +402,28 @@ pub fn update_cipher_from_data(
|
||||
_ => err!("Invalid type"),
|
||||
};
|
||||
|
||||
let mut type_data = match type_data_opt {
|
||||
Some(data) => data,
|
||||
let type_data = match type_data_opt {
|
||||
Some(mut data) => {
|
||||
// Remove the 'Response' key from the base object.
|
||||
data.as_object_mut().unwrap().remove("Response");
|
||||
// Remove the 'Response' key from every Uri.
|
||||
if data["Uris"].is_array() {
|
||||
data["Uris"] = _clean_cipher_data(data["Uris"].clone());
|
||||
}
|
||||
data
|
||||
},
|
||||
None => err!("Data missing"),
|
||||
};
|
||||
|
||||
// TODO: ******* Backwards compat start **********
|
||||
// To remove backwards compatibility, just delete this code,
|
||||
// and remove the compat code from cipher::to_json
|
||||
type_data["Name"] = Value::String(data.Name.clone());
|
||||
type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null);
|
||||
type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null);
|
||||
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
|
||||
// TODO: ******* Backwards compat end **********
|
||||
|
||||
cipher.favorite = data.Favorite.unwrap_or(false);
|
||||
cipher.name = data.Name;
|
||||
cipher.notes = data.Notes;
|
||||
cipher.fields = data.Fields.map(|f| f.to_string());
|
||||
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string() );
|
||||
cipher.data = type_data.to_string();
|
||||
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
||||
|
||||
cipher.save(&conn)?;
|
||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
||||
cipher.set_favorite(data.Favorite, &headers.user.uuid, &conn)?;
|
||||
|
||||
if ut != UpdateType::None {
|
||||
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
||||
@@ -371,6 +486,7 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called when an org admin modifies an existing org cipher.
|
||||
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
||||
fn put_cipher_admin(
|
||||
uuid: String,
|
||||
@@ -407,6 +523,11 @@ fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
// TODO: Check if only the folder ID or favorite status is being changed.
|
||||
// These are per-user properties that technically aren't part of the
|
||||
// cipher itself, so the user shouldn't need write access to change these.
|
||||
// Interestingly, upstream Bitwarden doesn't properly handle this either.
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
err!("Cipher is not write accessible")
|
||||
}
|
||||
@@ -540,7 +661,7 @@ struct ShareSelectedCipherData {
|
||||
}
|
||||
|
||||
#[put("/ciphers/share", data = "<data>")]
|
||||
fn put_cipher_share_seleted(
|
||||
fn put_cipher_share_selected(
|
||||
data: JsonUpcase<ShareSelectedCipherData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
@@ -608,9 +729,8 @@ fn share_cipher_by_uuid(
|
||||
match data.Cipher.OrganizationId.clone() {
|
||||
// If we don't get an organization ID, we don't do anything
|
||||
// No error because this is used when using the Clone functionality
|
||||
None => {},
|
||||
None => {}
|
||||
Some(organization_uuid) => {
|
||||
|
||||
for uuid in &data.CollectionIds {
|
||||
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
||||
None => err!("Invalid collection ID provided"),
|
||||
@@ -761,11 +881,7 @@ fn post_attachment_admin(
|
||||
post_attachment(uuid, data, content_type, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[post(
|
||||
"/ciphers/<uuid>/attachment/<attachment_id>/share",
|
||||
format = "multipart/form-data",
|
||||
data = "<data>"
|
||||
)]
|
||||
#[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_attachment_share(
|
||||
uuid: String,
|
||||
attachment_id: String,
|
||||
@@ -819,50 +935,79 @@ fn delete_attachment_admin(
|
||||
|
||||
#[post("/ciphers/<uuid>/delete")]
|
||||
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/delete-admin")]
|
||||
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/delete")]
|
||||
fn delete_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/delete-admin")]
|
||||
fn delete_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<uuid>")]
|
||||
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<uuid>/admin")]
|
||||
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||
}
|
||||
|
||||
#[delete("/ciphers", data = "<data>")]
|
||||
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
Some(ids) => match ids.as_array() {
|
||||
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||
None => err!("Posted ids field is not an array"),
|
||||
},
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
|
||||
for uuid in uuids {
|
||||
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, &nt) {
|
||||
return error;
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
_delete_multiple_ciphers(data, headers, conn, false, nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/delete", data = "<data>")]
|
||||
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_multiple_ciphers(data, headers, conn, false, nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/delete", data = "<data>")]
|
||||
fn delete_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_multiple_ciphers(data, headers, conn, true, nt) // soft delete
|
||||
}
|
||||
|
||||
#[delete("/ciphers/admin", data = "<data>")]
|
||||
fn delete_cipher_selected_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
delete_cipher_selected(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/delete-admin", data = "<data>")]
|
||||
fn delete_cipher_selected_post_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
delete_cipher_selected_post(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/delete-admin", data = "<data>")]
|
||||
fn delete_cipher_selected_put_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
delete_cipher_selected_put(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/restore")]
|
||||
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/restore-admin")]
|
||||
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/restore", data = "<data>")]
|
||||
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
_restore_multiple_ciphers(data, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct MoveCipherData {
|
||||
@@ -946,7 +1091,6 @@ fn delete_all(
|
||||
Some(user_org) => {
|
||||
if user_org.atype == UserOrgType::Owner {
|
||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||
nt.send_user_update(UpdateType::Vault, &user);
|
||||
Ok(())
|
||||
} else {
|
||||
@@ -974,8 +1118,8 @@ fn delete_all(
|
||||
}
|
||||
}
|
||||
|
||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {
|
||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_delete: bool, nt: &Notify) -> EmptyResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
@@ -984,11 +1128,81 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Not
|
||||
err!("Cipher can't be deleted by user")
|
||||
}
|
||||
|
||||
if soft_delete {
|
||||
cipher.deleted_at = Some(Utc::now().naive_utc());
|
||||
cipher.save(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
} else {
|
||||
cipher.delete(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, soft_delete: bool, nt: Notify) -> EmptyResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
Some(ids) => match ids.as_array() {
|
||||
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||
None => err!("Posted ids field is not an array"),
|
||||
},
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
|
||||
for uuid in uuids {
|
||||
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, soft_delete, &nt) {
|
||||
return error;
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
err!("Cipher can't be restored by user")
|
||||
}
|
||||
|
||||
cipher.deleted_at = None;
|
||||
cipher.save(&conn)?;
|
||||
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
|
||||
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
Some(ids) => match ids.as_array() {
|
||||
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||
None => err!("Posted ids field is not an array"),
|
||||
},
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
|
||||
let mut ciphers: Vec<Value> = Vec::new();
|
||||
for uuid in uuids {
|
||||
match _restore_cipher_by_uuid(uuid, headers, conn, nt) {
|
||||
Ok(json) => ciphers.push(json.into_inner()),
|
||||
err => return err
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": ciphers,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
fn _delete_cipher_attachment_by_id(
|
||||
uuid: &str,
|
||||
attachment_id: &str,
|
||||
|
@@ -1,15 +1,13 @@
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::Headers,
|
||||
db::{models::*, DbConn},
|
||||
};
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType};
|
||||
use crate::auth::Headers;
|
||||
|
||||
use rocket::Route;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
get_folders,
|
||||
get_folder,
|
||||
@@ -22,16 +20,16 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[get("/folders")]
|
||||
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": folders_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/folders/<uuid>")]
|
||||
@@ -50,7 +48,6 @@ fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
|
||||
pub struct FolderData {
|
||||
pub Name: String,
|
||||
}
|
||||
|
@@ -2,7 +2,10 @@ mod accounts;
|
||||
mod ciphers;
|
||||
mod folders;
|
||||
mod organizations;
|
||||
pub(crate) mod two_factor;
|
||||
pub mod two_factor;
|
||||
mod sends;
|
||||
|
||||
pub use sends::start_send_deletion_scheduler;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut mod_routes = routes![
|
||||
@@ -20,6 +23,7 @@ pub fn routes() -> Vec<Route> {
|
||||
routes.append(&mut folders::routes());
|
||||
routes.append(&mut organizations::routes());
|
||||
routes.append(&mut two_factor::routes());
|
||||
routes.append(&mut sends::routes());
|
||||
routes.append(&mut mod_routes);
|
||||
|
||||
routes
|
||||
@@ -29,17 +33,19 @@ pub fn routes() -> Vec<Route> {
|
||||
// Move this somewhere else
|
||||
//
|
||||
use rocket::Route;
|
||||
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::response::Response;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase};
|
||||
use crate::auth::Headers;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::Error;
|
||||
use crate::{
|
||||
api::{JsonResult, JsonUpcase},
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
error::Error,
|
||||
};
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
fn clear_device_token(uuid: String) -> EmptyResult {
|
||||
fn clear_device_token<'a>(uuid: String) -> Response<'a> {
|
||||
// This endpoint doesn't have auth header
|
||||
|
||||
let _ = uuid;
|
||||
@@ -48,11 +54,11 @@ fn clear_device_token(uuid: String) -> EmptyResult {
|
||||
// This only clears push token
|
||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
Ok(())
|
||||
Response::new()
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> JsonResult {
|
||||
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> Json<Value> {
|
||||
let _data: Value = data.into_inner().data;
|
||||
// Data has a single string value "PushToken"
|
||||
let _ = uuid;
|
||||
@@ -60,13 +66,13 @@ fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) ->
|
||||
|
||||
// TODO: This should save the push token, but we don't have push functionality
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Id": headers.device.uuid,
|
||||
"Name": headers.device.name,
|
||||
"Type": headers.device.atype,
|
||||
"Identifier": headers.device.uuid,
|
||||
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
@@ -80,11 +86,11 @@ struct GlobalDomain {
|
||||
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
||||
|
||||
#[get("/settings/domains")]
|
||||
fn get_eq_domains(headers: Headers) -> JsonResult {
|
||||
fn get_eq_domains(headers: Headers) -> Json<Value> {
|
||||
_get_eq_domains(headers, false)
|
||||
}
|
||||
|
||||
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
|
||||
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
|
||||
let user = headers.user;
|
||||
use serde_json::from_str;
|
||||
|
||||
@@ -101,11 +107,11 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
|
||||
globals.retain(|g| !g.Excluded);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"EquivalentDomains": equivalent_domains,
|
||||
"GlobalEquivalentDomains": globals,
|
||||
"Object": "domains",
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -146,7 +152,7 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
username
|
||||
);
|
||||
|
||||
use reqwest::{header::USER_AGENT, blocking::Client};
|
||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let hibp_client = Client::builder().build()?;
|
||||
@@ -171,7 +177,7 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
"Domain": "haveibeenpwned.com",
|
||||
"BreachDate": "2019-08-18T00:00:00Z",
|
||||
"AddedDate": "2019-08-18T00:00:00Z",
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"LogoPath": "bwrs_static/hibp.png",
|
||||
"PwnCount": 0,
|
||||
"DataClasses": [
|
||||
|
@@ -1,17 +1,14 @@
|
||||
use rocket::request::Form;
|
||||
use rocket::Route;
|
||||
use num_traits::FromPrimitive;
|
||||
use rocket::{request::Form, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use num_traits::FromPrimitive;
|
||||
|
||||
use crate::api::{
|
||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType,
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders, ManagerHeaders, ManagerHeadersLoose},
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
use crate::auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders};
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::mail;
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
@@ -50,6 +47,10 @@ pub fn routes() -> Vec<Route> {
|
||||
list_policies_token,
|
||||
get_policy,
|
||||
put_policy,
|
||||
get_organization_tax,
|
||||
get_plans,
|
||||
get_plans_tax_rates,
|
||||
import,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -79,6 +80,10 @@ struct NewCollectionData {
|
||||
|
||||
#[post("/organizations", data = "<data>")]
|
||||
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.is_org_creation_allowed(&headers.user.email) {
|
||||
err!("User not allowed to create organizations")
|
||||
}
|
||||
|
||||
let data: OrgData = data.into_inner().data;
|
||||
|
||||
let org = Organization::new(data.Name, data.BillingEmail);
|
||||
@@ -187,8 +192,8 @@ fn post_organization(
|
||||
|
||||
// GET /api/collections?writeOnly=false
|
||||
#[get("/collections")]
|
||||
fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
Ok(Json(json!({
|
||||
fn get_user_collections(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
Json(json!({
|
||||
"Data":
|
||||
Collection::find_by_user_uuid(&headers.user.uuid, &conn)
|
||||
.iter()
|
||||
@@ -196,12 +201,12 @@ fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
.collect::<Value>(),
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/collections")]
|
||||
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
Ok(Json(json!({
|
||||
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
|
||||
Json(json!({
|
||||
"Data":
|
||||
Collection::find_by_organization(&org_id, &conn)
|
||||
.iter()
|
||||
@@ -209,13 +214,13 @@ fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) ->
|
||||
.collect::<Value>(),
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[post("/organizations/<org_id>/collections", data = "<data>")]
|
||||
fn post_organization_collections(
|
||||
org_id: String,
|
||||
_headers: AdminHeaders,
|
||||
headers: ManagerHeadersLoose,
|
||||
data: JsonUpcase<NewCollectionData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
@@ -226,9 +231,22 @@ fn post_organization_collections(
|
||||
None => err!("Can't find organization details"),
|
||||
};
|
||||
|
||||
// Get the user_organization record so that we can check if the user has access to all collections.
|
||||
let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
Some(u) => u,
|
||||
None => err!("User is not part of organization"),
|
||||
};
|
||||
|
||||
let collection = Collection::new(org.uuid, data.Name);
|
||||
collection.save(&conn)?;
|
||||
|
||||
// If the user doesn't have access to all collections, only in case of a Manger,
|
||||
// then we need to save the creating user uuid (Manager) to the users_collection table.
|
||||
// Else the user will not have access to his own created collection.
|
||||
if !user_org.access_all {
|
||||
CollectionUser::save(&headers.user.uuid, &collection.uuid, false, false, &conn)?;
|
||||
}
|
||||
|
||||
Ok(Json(collection.to_json()))
|
||||
}
|
||||
|
||||
@@ -236,7 +254,7 @@ fn post_organization_collections(
|
||||
fn put_organization_collection_update(
|
||||
org_id: String,
|
||||
col_id: String,
|
||||
headers: AdminHeaders,
|
||||
headers: ManagerHeaders,
|
||||
data: JsonUpcase<NewCollectionData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
@@ -247,7 +265,7 @@ fn put_organization_collection_update(
|
||||
fn post_organization_collection_update(
|
||||
org_id: String,
|
||||
col_id: String,
|
||||
_headers: AdminHeaders,
|
||||
_headers: ManagerHeaders,
|
||||
data: JsonUpcase<NewCollectionData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
@@ -315,7 +333,7 @@ fn post_organization_collection_delete_user(
|
||||
}
|
||||
|
||||
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
||||
fn delete_organization_collection(org_id: String, col_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
|
||||
fn delete_organization_collection(org_id: String, col_id: String, _headers: ManagerHeaders, conn: DbConn) -> EmptyResult {
|
||||
match Collection::find_by_uuid(&col_id, &conn) {
|
||||
None => err!("Collection not found"),
|
||||
Some(collection) => {
|
||||
@@ -339,7 +357,7 @@ struct DeleteCollectionData {
|
||||
fn post_organization_collection_delete(
|
||||
org_id: String,
|
||||
col_id: String,
|
||||
headers: AdminHeaders,
|
||||
headers: ManagerHeaders,
|
||||
_data: JsonUpcase<DeleteCollectionData>,
|
||||
conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
@@ -347,7 +365,7 @@ fn post_organization_collection_delete(
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
|
||||
fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn get_org_collection_detail(org_id: String, coll_id: String, headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
|
||||
None => err!("Collection not found"),
|
||||
Some(collection) => {
|
||||
@@ -361,7 +379,7 @@ fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHead
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/collections/<coll_id>/users")]
|
||||
fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn get_collection_users(org_id: String, coll_id: String, _headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||
// Get org and collection, check that collection is from org
|
||||
let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
@@ -374,7 +392,7 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
|
||||
.map(|col_user| {
|
||||
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
|
||||
.unwrap()
|
||||
.to_json_collection_user_details(col_user.read_only)
|
||||
.to_json_user_access_restrictions(&col_user)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -386,7 +404,7 @@ fn put_collection_users(
|
||||
org_id: String,
|
||||
coll_id: String,
|
||||
data: JsonUpcaseVec<CollectionData>,
|
||||
_headers: AdminHeaders,
|
||||
_headers: ManagerHeaders,
|
||||
conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
// Get org and collection, check that collection is from org
|
||||
@@ -408,7 +426,9 @@ fn put_collection_users(
|
||||
continue;
|
||||
}
|
||||
|
||||
CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, &conn)?;
|
||||
CollectionUser::save(&user.user_uuid, &coll_id,
|
||||
d.ReadOnly, d.HidePasswords,
|
||||
&conn)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -421,30 +441,30 @@ struct OrgIdData {
|
||||
}
|
||||
|
||||
#[get("/ciphers/organization-details?<data..>")]
|
||||
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let ciphers = Cipher::find_by_org(&data.organization_id, &conn);
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
.collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": ciphers_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/users")]
|
||||
fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json<Value> {
|
||||
let users = UserOrganization::find_by_org(&org_id, &conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": users_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -452,6 +472,7 @@ fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonRe
|
||||
struct CollectionData {
|
||||
Id: String,
|
||||
ReadOnly: bool,
|
||||
HidePasswords: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -485,7 +506,11 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
let user = match User::find_by_mail(&email, &conn) {
|
||||
None => {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("User email does not exist: {}", email))
|
||||
err!(format!("User does not exist: {}", email))
|
||||
}
|
||||
|
||||
if !CONFIG.is_email_domain_allowed(&email) {
|
||||
err!("Email domain not eligible for invitations")
|
||||
}
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
@@ -519,7 +544,9 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, &conn)?;
|
||||
CollectionUser::save(&user.uuid, &collection.uuid,
|
||||
col.ReadOnly, col.HidePasswords,
|
||||
&conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -628,7 +655,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let mut org_name = String::from("bitwarden_rs");
|
||||
let mut org_name = CONFIG.invitation_org_name();
|
||||
if let Some(org_id) = &claims.org_id {
|
||||
org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||
Some(org) => org.name,
|
||||
@@ -774,7 +801,9 @@ fn edit_user(
|
||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid, col.ReadOnly, &conn)?;
|
||||
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid,
|
||||
col.ReadOnly, col.HidePasswords,
|
||||
&conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -901,15 +930,15 @@ fn post_org_import(
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/policies")]
|
||||
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
|
||||
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": policies_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/policies/token?<token>")]
|
||||
@@ -940,7 +969,7 @@ fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResul
|
||||
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||
Some(pt) => pt,
|
||||
None => err!("Invalid policy type"),
|
||||
None => err!("Invalid or unsupported policy type"),
|
||||
};
|
||||
|
||||
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||
@@ -979,3 +1008,169 @@ fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: A
|
||||
|
||||
Ok(Json(policy.to_json()))
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
#[get("/organizations/<org_id>/tax")]
|
||||
fn get_organization_tax(org_id: String, _headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||
// Prevent a 404 error, which also causes Javascript errors.
|
||||
err!("Only allowed when not self hosted.")
|
||||
}
|
||||
|
||||
#[get("/plans")]
|
||||
fn get_plans(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||
Json(json!({
|
||||
"Object": "list",
|
||||
"Data": [
|
||||
{
|
||||
"Object": "plan",
|
||||
"Type": 0,
|
||||
"Product": 0,
|
||||
"Name": "Free",
|
||||
"IsAnnual": false,
|
||||
"NameLocalizationKey": "planNameFree",
|
||||
"DescriptionLocalizationKey": "planDescFree",
|
||||
"CanBeUsedByBusiness": false,
|
||||
"BaseSeats": 2,
|
||||
"BaseStorageGb": null,
|
||||
"MaxCollections": 2,
|
||||
"MaxUsers": 2,
|
||||
"HasAdditionalSeatsOption": false,
|
||||
"MaxAdditionalSeats": null,
|
||||
"HasAdditionalStorageOption": false,
|
||||
"MaxAdditionalStorage": null,
|
||||
"HasPremiumAccessOption": false,
|
||||
"TrialPeriodDays": null,
|
||||
"HasSelfHost": false,
|
||||
"HasPolicies": false,
|
||||
"HasGroups": false,
|
||||
"HasDirectory": false,
|
||||
"HasEvents": false,
|
||||
"HasTotp": false,
|
||||
"Has2fa": false,
|
||||
"HasApi": false,
|
||||
"HasSso": false,
|
||||
"UsersGetPremium": false,
|
||||
"UpgradeSortOrder": -1,
|
||||
"DisplaySortOrder": -1,
|
||||
"LegacyYear": null,
|
||||
"Disabled": false,
|
||||
"StripePlanId": null,
|
||||
"StripeSeatPlanId": null,
|
||||
"StripeStoragePlanId": null,
|
||||
"StripePremiumAccessPlanId": null,
|
||||
"BasePrice": 0.0,
|
||||
"SeatPrice": 0.0,
|
||||
"AdditionalStoragePricePerGb": 0.0,
|
||||
"PremiumAccessOptionPrice": 0.0
|
||||
}
|
||||
],
|
||||
"ContinuationToken": null
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/plans/sales-tax-rates")]
|
||||
fn get_plans_tax_rates(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||
// Prevent a 404 error, which also causes Javascript errors.
|
||||
Json(json!({
|
||||
"Object": "list",
|
||||
"Data": [],
|
||||
"ContinuationToken": null
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportGroupData {
|
||||
Name: String, // "GroupName"
|
||||
ExternalId: String, // "cn=GroupName,ou=Groups,dc=example,dc=com"
|
||||
Users: Vec<String>, // ["uid=user,ou=People,dc=example,dc=com"]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportUserData {
|
||||
Email: String, // "user@maildomain.net"
|
||||
ExternalId: String, // "uid=user,ou=People,dc=example,dc=com"
|
||||
Deleted: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportData {
|
||||
Groups: Vec<OrgImportGroupData>,
|
||||
OverwriteExisting: bool,
|
||||
Users: Vec<OrgImportUserData>,
|
||||
}
|
||||
|
||||
#[post("/organizations/<org_id>/import", data = "<data>")]
|
||||
fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data = data.into_inner().data;
|
||||
|
||||
// TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way
|
||||
// to differentiate between auto-imported users and manually added ones.
|
||||
// This means that this endpoint can end up removing users that were added manually by an admin,
|
||||
// as opposed to upstream which only removes auto-imported users.
|
||||
|
||||
// User needs to be admin or owner to use the Directry Connector
|
||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
||||
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
||||
None => err!("User not part of organization"),
|
||||
};
|
||||
|
||||
for user_data in &data.Users {
|
||||
if user_data.Deleted {
|
||||
// If user is marked for deletion and it exists, delete it
|
||||
if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn) {
|
||||
user_org.delete(&conn)?;
|
||||
}
|
||||
|
||||
// If user is not part of the organization, but it exists
|
||||
} else if UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).is_none() {
|
||||
if let Some (user) = User::find_by_mail(&user_data.Email, &conn) {
|
||||
|
||||
let user_org_status = if CONFIG.mail_enabled() {
|
||||
UserOrgStatus::Invited as i32
|
||||
} else {
|
||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||
};
|
||||
|
||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||
new_org_user.access_all = false;
|
||||
new_org_user.atype = UserOrgType::User as i32;
|
||||
new_org_user.status = user_org_status;
|
||||
|
||||
new_org_user.save(&conn)?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||
Some(org) => org.name,
|
||||
None => err!("Error looking up organization"),
|
||||
};
|
||||
|
||||
mail::send_invite(
|
||||
&user_data.Email,
|
||||
&user.uuid,
|
||||
Some(org_id.clone()),
|
||||
Some(new_org_user.uuid),
|
||||
&org_name,
|
||||
Some(headers.user.email.clone()),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||
if data.OverwriteExisting {
|
||||
for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User as i32, &conn) {
|
||||
if let Some (user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).map(|u| u.email) {
|
||||
if !data.Users.iter().any(|u| u.Email == user_email) {
|
||||
user_org.delete(&conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
416
src/api/core/sends.rs
Normal file
416
src/api/core/sends.rs
Normal file
@@ -0,0 +1,416 @@
|
||||
use std::{io::Read, path::Path};
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||
use rocket::{http::ContentType, Data};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::{Headers, Host},
|
||||
db::{models::*, DbConn},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
post_send,
|
||||
post_send_file,
|
||||
post_access,
|
||||
post_access_file,
|
||||
put_send,
|
||||
delete_send,
|
||||
put_remove_password
|
||||
]
|
||||
}
|
||||
|
||||
pub fn start_send_deletion_scheduler(pool: crate::db::DbPool) {
|
||||
std::thread::spawn(move || {
|
||||
loop {
|
||||
if let Ok(conn) = pool.get() {
|
||||
info!("Initiating send deletion");
|
||||
for send in Send::find_all(&conn) {
|
||||
if chrono::Utc::now().naive_utc() >= send.deletion_date {
|
||||
send.delete(&conn).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_secs(3600));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendData {
|
||||
pub Type: i32,
|
||||
pub Key: String,
|
||||
pub Password: Option<String>,
|
||||
pub MaxAccessCount: Option<i32>,
|
||||
pub ExpirationDate: Option<DateTime<Utc>>,
|
||||
pub DeletionDate: DateTime<Utc>,
|
||||
pub Disabled: bool,
|
||||
|
||||
// Data field
|
||||
pub Name: String,
|
||||
pub Notes: Option<String>,
|
||||
pub Text: Option<Value>,
|
||||
pub File: Option<Value>,
|
||||
}
|
||||
|
||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||
/// an org with this policy enabled isn't allowed to create new Sends or
|
||||
/// modify existing ones, but is allowed to delete them.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#disable-send
|
||||
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::DisableSend;
|
||||
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
let data_val = if data.Type == SendType::Text as i32 {
|
||||
data.Text
|
||||
} else if data.Type == SendType::File as i32 {
|
||||
data.File
|
||||
} else {
|
||||
err!("Invalid Send type")
|
||||
};
|
||||
|
||||
let data_str = if let Some(mut d) = data_val {
|
||||
d.as_object_mut().and_then(|o| o.remove("Response"));
|
||||
serde_json::to_string(&d)?
|
||||
} else {
|
||||
err!("Send data not provided");
|
||||
};
|
||||
|
||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||
err!(
|
||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||
);
|
||||
}
|
||||
|
||||
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
||||
send.user_uuid = Some(user_uuid);
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.disabled = data.Disabled;
|
||||
send.atype = data.Type;
|
||||
|
||||
send.set_password(data.Password.as_deref());
|
||||
|
||||
Ok(send)
|
||||
}
|
||||
|
||||
#[post("/sends", data = "<data>")]
|
||||
fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
|
||||
if data.Type == SendType::File as i32 {
|
||||
err!("File sends should use /api/sends/file")
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid.clone())?;
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let boundary = content_type.params().next().expect("No boundary provided").1;
|
||||
|
||||
let mut mpart = Multipart::with_body(data.open(), boundary);
|
||||
|
||||
// First entry is the SendData JSON
|
||||
let mut model_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "model" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
|
||||
let mut buf = String::new();
|
||||
model_entry.data.read_to_string(&mut buf)?;
|
||||
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
|
||||
|
||||
// Get the file length and add an extra 10% to avoid issues
|
||||
const SIZE_110_MB: u64 = 115_343_360;
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
|
||||
if left <= 0 {
|
||||
err!("Attachment size limit reached! Delete some files to open space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_110_MB)
|
||||
}
|
||||
None => SIZE_110_MB,
|
||||
};
|
||||
|
||||
// Create the Send
|
||||
let mut send = create_send(data.data, headers.user.uuid.clone())?;
|
||||
let file_id: String = data_encoding::HEXLOWER.encode(&crate::crypto::get_random(vec![0; 32]));
|
||||
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id);
|
||||
|
||||
// Read the data entry and save the file
|
||||
let mut data_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "data" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
|
||||
let size = match data_entry
|
||||
.data
|
||||
.save()
|
||||
.memory_threshold(0)
|
||||
.size_limit(size_limit)
|
||||
.with_path(&file_path)
|
||||
{
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment is not a file: {:?}", other));
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Error: {:?}", e));
|
||||
}
|
||||
};
|
||||
|
||||
// Set ID and sizes
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id));
|
||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||
o.insert(
|
||||
String::from("SizeName"),
|
||||
Value::String(crate::util::get_display_size(size)),
|
||||
);
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
|
||||
// Save the changes in the database
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendAccessData {
|
||||
pub Password: Option<String>,
|
||||
}
|
||||
|
||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||
fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
|
||||
if let Some(max_access_count) = send.max_access_count {
|
||||
if send.access_count >= max_access_count {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(expiration) = send.expiration_date {
|
||||
if Utc::now().naive_utc() >= expiration {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
}
|
||||
|
||||
if Utc::now().naive_utc() >= send.deletion_date {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
|
||||
if send.disabled {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
|
||||
if send.password_hash.is_some() {
|
||||
match data.into_inner().data.Password {
|
||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||
Some(_) => err!("Invalid password."),
|
||||
None => err_code!("Password not provided", 401),
|
||||
}
|
||||
}
|
||||
|
||||
// Files are incremented during the download
|
||||
if send.atype == SendType::Text as i32 {
|
||||
send.access_count += 1;
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
|
||||
Ok(Json(send.to_json_access()))
|
||||
}
|
||||
|
||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||
fn post_access_file(
|
||||
send_id: String,
|
||||
file_id: String,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
host: Host,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
|
||||
if let Some(max_access_count) = send.max_access_count {
|
||||
if send.access_count >= max_access_count {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(expiration) = send.expiration_date {
|
||||
if Utc::now().naive_utc() >= expiration {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
}
|
||||
|
||||
if Utc::now().naive_utc() >= send.deletion_date {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
|
||||
if send.disabled {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
|
||||
if send.password_hash.is_some() {
|
||||
match data.into_inner().data.Password {
|
||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||
Some(_) => err!("Invalid password."),
|
||||
None => err_code!("Password not provided", 401),
|
||||
}
|
||||
}
|
||||
|
||||
send.access_count += 1;
|
||||
|
||||
send.save(&conn)?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Object": "send-fileDownload",
|
||||
"Id": file_id,
|
||||
"Url": format!("{}/sends/{}/{}", &host.host, send_id, file_id)
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/sends/<id>", data = "<data>")]
|
||||
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
if send.atype != data.Type {
|
||||
err!("Sends can't change type")
|
||||
}
|
||||
|
||||
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
||||
// so we only need to update the data field in the Text case
|
||||
if data.Type == SendType::Text as i32 {
|
||||
let data_str = if let Some(mut d) = data.Text {
|
||||
d.as_object_mut().and_then(|d| d.remove("Response"));
|
||||
serde_json::to_string(&d)?
|
||||
} else {
|
||||
err!("Send data not provided");
|
||||
};
|
||||
send.data = data_str;
|
||||
}
|
||||
|
||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||
err!(
|
||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||
);
|
||||
}
|
||||
send.name = data.Name;
|
||||
send.akey = data.Key;
|
||||
send.deletion_date = data.DeletionDate.naive_utc();
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.disabled = data.Disabled;
|
||||
|
||||
// Only change the value if it's present
|
||||
if let Some(password) = data.Password {
|
||||
send.set_password(Some(&password));
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[delete("/sends/<id>")]
|
||||
fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(&id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
send.delete(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendDelete, &headers.user);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/sends/<id>/remove-password")]
|
||||
fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
send.set_password(None);
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
@@ -2,13 +2,16 @@ use data_encoding::BASE32;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::crypto;
|
||||
use crate::db::{
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
};
|
||||
|
||||
pub use crate::config::CONFIG;
|
||||
@@ -20,6 +23,7 @@ pub fn routes() -> Vec<Route> {
|
||||
activate_authenticator_put,
|
||||
]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
@@ -53,7 +57,12 @@ struct EnableAuthenticatorData {
|
||||
}
|
||||
|
||||
#[post("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn activate_authenticator(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
let key = data.Key;
|
||||
@@ -76,7 +85,7 @@ fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: He
|
||||
}
|
||||
|
||||
// Validate the token provided with the key, and save new twofactor
|
||||
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &conn)?;
|
||||
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &ip, &conn)?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
|
||||
@@ -88,20 +97,31 @@ fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: He
|
||||
}
|
||||
|
||||
#[put("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator_put(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_authenticator(data, headers, conn)
|
||||
fn activate_authenticator_put(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
activate_authenticator(data, headers, ip, conn)
|
||||
}
|
||||
|
||||
pub fn validate_totp_code_str(user_uuid: &str, totp_code: &str, secret: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub fn validate_totp_code_str(
|
||||
user_uuid: &str,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
ip: &ClientIp,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
let totp_code: u64 = match totp_code.parse() {
|
||||
Ok(code) => code,
|
||||
_ => err!("TOTP code is not a number"),
|
||||
};
|
||||
|
||||
validate_totp_code(user_uuid, totp_code, secret, &conn)
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, &conn)
|
||||
}
|
||||
|
||||
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
||||
use oath::{totp_raw_custom_time, HashType};
|
||||
|
||||
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
||||
@@ -143,11 +163,22 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &
|
||||
twofactor.save(&conn)?;
|
||||
return Ok(());
|
||||
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||
warn!("This or a TOTP code within {} steps back and forward has already been used!", steps);
|
||||
err!(format!("Invalid TOTP code! Server time: {}", current_time.format("%F %T UTC")));
|
||||
warn!(
|
||||
"This or a TOTP code within {} steps back and forward has already been used!",
|
||||
steps
|
||||
);
|
||||
err!(format!(
|
||||
"Invalid TOTP code! Server time: {} IP: {}",
|
||||
current_time.format("%F %T UTC"),
|
||||
ip.ip
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Else no valide code received, deny access
|
||||
err!(format!("Invalid TOTP code! Server time: {}", current_time.format("%F %T UTC")));
|
||||
err!(format!(
|
||||
"Invalid TOTP code! Server time: {} IP: {}",
|
||||
current_time.format("%F %T UTC"),
|
||||
ip.ip
|
||||
));
|
||||
}
|
||||
|
@@ -2,18 +2,18 @@ use chrono::Utc;
|
||||
use data_encoding::BASE64;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::crypto;
|
||||
use crate::db::{
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType, User},
|
||||
DbConn,
|
||||
},
|
||||
error::MapResult,
|
||||
CONFIG,
|
||||
};
|
||||
use crate::error::MapResult;
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_duo, activate_duo, activate_duo_put,]
|
||||
@@ -187,7 +187,7 @@ fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbC
|
||||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
|
||||
|
||||
use reqwest::{header::*, Method, blocking::Client};
|
||||
use reqwest::{blocking::Client, header::*, Method};
|
||||
use std::str::FromStr;
|
||||
|
||||
// https://duo.com/docs/authapi#api-details
|
||||
|
@@ -1,21 +1,18 @@
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::crypto;
|
||||
use crate::db::{
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
mail, CONFIG,
|
||||
};
|
||||
use crate::error::Error;
|
||||
use crate::mail;
|
||||
use crate::CONFIG;
|
||||
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use std::ops::Add;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_email, send_email_login, send_email, email,]
|
||||
@@ -59,7 +56,7 @@ fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> Empty
|
||||
/// Generate the token, save the data for later verification and send email to user
|
||||
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn).map_res("Two factor not found")?;
|
||||
|
||||
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
|
||||
|
||||
@@ -68,7 +65,7 @@ pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
twofactor.data = twofactor_data.to_json();
|
||||
twofactor.save(&conn)?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -135,7 +132,7 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
|
||||
);
|
||||
twofactor.save(&conn)?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -159,7 +156,7 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).map_res("Two factor not found")?;
|
||||
|
||||
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
|
||||
@@ -189,7 +186,7 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
/// Validate the email code when used as TwoFactor token mechanism
|
||||
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
||||
let mut email_data = EmailTokenData::from_json(&data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn).map_res("Two factor not found")?;
|
||||
let issued_token = match &email_data.last_token {
|
||||
Some(t) => t,
|
||||
_ => err!("No token available"),
|
||||
@@ -212,7 +209,7 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &
|
||||
|
||||
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
|
||||
let max_time = CONFIG.email_expiration_time() as i64;
|
||||
if date.add(Duration::seconds(max_time)) < Utc::now().naive_utc() {
|
||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
||||
err!("Token has expired")
|
||||
}
|
||||
|
||||
|
@@ -1,22 +1,23 @@
|
||||
use data_encoding::BASE32;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api::{JsonResult, JsonUpcase, NumberOrString, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::crypto;
|
||||
use crate::db::{
|
||||
use crate::{
|
||||
api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, User},
|
||||
DbConn,
|
||||
},
|
||||
};
|
||||
|
||||
pub(crate) mod authenticator;
|
||||
pub(crate) mod duo;
|
||||
pub(crate) mod email;
|
||||
pub(crate) mod u2f;
|
||||
pub(crate) mod yubikey;
|
||||
pub mod authenticator;
|
||||
pub mod duo;
|
||||
pub mod email;
|
||||
pub mod u2f;
|
||||
pub mod yubikey;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut routes = routes![
|
||||
@@ -37,15 +38,15 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[get("/two-factor")]
|
||||
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
|
||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_list).collect();
|
||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": twofactors_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-recover", data = "<data>")]
|
||||
|
@@ -1,21 +1,26 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
use serde_json::Value;
|
||||
use u2f::messages::{RegisterResponse, SignResponse, U2fSignRequest};
|
||||
use u2f::protocol::{Challenge, U2f};
|
||||
use u2f::register::Registration;
|
||||
use u2f::{
|
||||
messages::{RegisterResponse, SignResponse, U2fSignRequest},
|
||||
protocol::{Challenge, U2f},
|
||||
register::Registration,
|
||||
};
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::db::{
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
PasswordData,
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
CONFIG,
|
||||
};
|
||||
use crate::error::Error;
|
||||
use crate::CONFIG;
|
||||
|
||||
const U2F_VERSION: &str = "U2F_V2";
|
||||
|
||||
@@ -126,12 +131,12 @@ struct RegisterResponseCopy {
|
||||
pub error_code: Option<NumberOrString>,
|
||||
}
|
||||
|
||||
impl Into<RegisterResponse> for RegisterResponseCopy {
|
||||
fn into(self) -> RegisterResponse {
|
||||
impl From<RegisterResponseCopy> for RegisterResponse {
|
||||
fn from(r: RegisterResponseCopy) -> RegisterResponse {
|
||||
RegisterResponse {
|
||||
registration_data: self.registration_data,
|
||||
version: self.version,
|
||||
client_data: self.client_data,
|
||||
registration_data: r.registration_data,
|
||||
version: r.version,
|
||||
client_data: r.client_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,19 +1,18 @@
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
use serde_json::Value;
|
||||
use yubico::config::Config;
|
||||
use yubico::verify;
|
||||
use yubico::{config::Config, verify};
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::db::{
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
CONFIG,
|
||||
};
|
||||
use crate::error::{Error, MapResult};
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
|
||||
|
436
src/api/icons.rs
436
src/api/icons.rs
@@ -1,50 +1,84 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::prelude::*,
|
||||
net::{IpAddr, ToSocketAddrs},
|
||||
sync::RwLock,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use std::fs::{create_dir_all, remove_file, symlink_metadata, File};
|
||||
use std::io::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use rocket::http::ContentType;
|
||||
use rocket::response::Content;
|
||||
use rocket::Route;
|
||||
|
||||
use reqwest::{Url, header::HeaderMap, blocking::Client, blocking::Response};
|
||||
|
||||
use rocket::http::Cookie;
|
||||
|
||||
use regex::Regex;
|
||||
use soup::prelude::*;
|
||||
use reqwest::{blocking::Client, blocking::Response, header, Url};
|
||||
use rocket::{http::ContentType, http::Cookie, response::Content, Route};
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::CONFIG;
|
||||
use crate::util::Cached;
|
||||
use crate::{error::Error, util::Cached, CONFIG};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![icon]
|
||||
}
|
||||
|
||||
const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png");
|
||||
|
||||
const ALLOWED_CHARS: &str = "_-.";
|
||||
|
||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
// Generate the default headers
|
||||
let mut default_headers = header::HeaderMap::new();
|
||||
default_headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en-US,en;q=0.8"));
|
||||
default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::ACCEPT, header::HeaderValue::from_static("text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8"));
|
||||
|
||||
// Reuse the client between requests
|
||||
Client::builder()
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(_header_map())
|
||||
.default_headers(default_headers)
|
||||
.build()
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
// Build Regex only once since this takes a lot of time.
|
||||
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||
|
||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon(domain: String) -> Option<Cached<Content<Vec<u8>>>> {
|
||||
if !is_valid_domain(&domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return None;
|
||||
}
|
||||
|
||||
get_icon(&domain).map(|icon| Cached::ttl(Content(ContentType::new("image", "x-icon"), icon), CONFIG.icon_cache_ttl()))
|
||||
}
|
||||
|
||||
/// Returns if the domain provided is valid or not.
|
||||
///
|
||||
/// This does some manual checks and makes use of Url to do some basic checking.
|
||||
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
|
||||
fn is_valid_domain(domain: &str) -> bool {
|
||||
// Don't allow empty or too big domains or path traversal
|
||||
if domain.is_empty() || domain.len() > 255 || domain.contains("..") {
|
||||
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||
if let Err(parse_error) = Url::parse(format!("https://{}", domain).as_str()) {
|
||||
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
||||
return false;
|
||||
} else if domain.is_empty()
|
||||
|| domain.contains("..")
|
||||
|| domain.starts_with('.')
|
||||
|| domain.starts_with('-')
|
||||
|| domain.ends_with('-')
|
||||
{
|
||||
debug!("Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'", domain);
|
||||
return false;
|
||||
} else if domain.len() > 255 {
|
||||
debug!("Domain validation error: '{}' exceeds 255 characters", domain);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only alphanumeric or specific characters
|
||||
for c in domain.chars() {
|
||||
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
||||
debug!("Domain validation error: '{}' contains an invalid character '{}'", domain, c);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -52,25 +86,112 @@ fn is_valid_domain(domain: &str) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
|
||||
let icon_type = ContentType::new("image", "x-icon");
|
||||
|
||||
if !is_valid_domain(&domain) {
|
||||
warn!("Invalid domain: {:#?}", domain);
|
||||
return Cached::long(Content(icon_type, FALLBACK_ICON.to_vec()));
|
||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
||||
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
||||
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
||||
#[allow(clippy::nonminimal_bool)]
|
||||
#[cfg(not(feature = "unstable"))]
|
||||
fn is_global(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(ip) => {
|
||||
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
|
||||
// globally routable addresses in the 192.0.0.0/24 range.
|
||||
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
|
||||
return true;
|
||||
}
|
||||
!ip.is_private()
|
||||
&& !ip.is_loopback()
|
||||
&& !ip.is_link_local()
|
||||
&& !ip.is_broadcast()
|
||||
&& !ip.is_documentation()
|
||||
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
|
||||
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|
||||
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
|
||||
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
|
||||
// Make sure the address is not in 0.0.0.0/8
|
||||
&& ip.octets()[0] != 0
|
||||
}
|
||||
IpAddr::V6(ip) => {
|
||||
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
|
||||
true
|
||||
} else {
|
||||
!ip.is_multicast()
|
||||
&& !ip.is_loopback()
|
||||
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
|
||||
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
|
||||
&& !ip.is_unspecified()
|
||||
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Cached::long(Content(icon_type, get_icon(&domain)))
|
||||
}
|
||||
|
||||
fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
||||
#[cfg(feature = "unstable")]
|
||||
fn is_global(ip: IpAddr) -> bool {
|
||||
ip.is_global()
|
||||
}
|
||||
|
||||
/// These are some tests to check that the implementations match
|
||||
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
|
||||
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
|
||||
/// Note that the is_global implementation is subject to change as new IP RFCs are created
|
||||
///
|
||||
/// To run while showing progress output:
|
||||
/// cargo test --features sqlite,unstable -- --nocapture --ignored
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "unstable")]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ipv4_global() {
|
||||
for a in 0..u8::MAX {
|
||||
println!("Iter: {}/255", a);
|
||||
for b in 0..u8::MAX {
|
||||
for c in 0..u8::MAX {
|
||||
for d in 0..u8::MAX {
|
||||
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
|
||||
assert_eq!(ip.is_global(), is_global(ip))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ipv6_global() {
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
let mut v = [0u8; 16];
|
||||
let rand = SystemRandom::new();
|
||||
for i in 0..1_000 {
|
||||
println!("Iter: {}/1_000", i);
|
||||
for _ in 0..10_000_000 {
|
||||
rand.fill(&mut v).expect("Error generating random values");
|
||||
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
|
||||
(v[14] as u16) << 8 | v[15] as u16,
|
||||
(v[12] as u16) << 8 | v[13] as u16,
|
||||
(v[10] as u16) << 8 | v[11] as u16,
|
||||
(v[8] as u16) << 8 | v[9] as u16,
|
||||
(v[6] as u16) << 8 | v[7] as u16,
|
||||
(v[4] as u16) << 8 | v[5] as u16,
|
||||
(v[2] as u16) << 8 | v[3] as u16,
|
||||
(v[0] as u16) << 8 | v[1] as u16,
|
||||
));
|
||||
assert_eq!(ip.is_global(), is_global(ip))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
|
||||
&& (domain, 0)
|
||||
.to_socket_addrs()
|
||||
.map(|x| {
|
||||
for ip_port in x {
|
||||
if !ip_port.ip().is_global() {
|
||||
if !is_global(ip_port.ip()) {
|
||||
warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain);
|
||||
return true;
|
||||
}
|
||||
@@ -82,7 +203,31 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
||||
// Skip the regex check if the previous one is true already
|
||||
if !is_blacklisted {
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
let regex = Regex::new(&blacklist).expect("Valid Regex");
|
||||
let mut regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
||||
let regex = if let Some(regex) = regex_hashmap.get(&blacklist) {
|
||||
regex
|
||||
} else {
|
||||
drop(regex_hashmap);
|
||||
|
||||
let mut regex_hashmap_write = ICON_BLACKLIST_REGEX.write().unwrap();
|
||||
// Clear the current list if the previous key doesn't exists.
|
||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
||||
if regex_hashmap_write.len() >= 1 {
|
||||
regex_hashmap_write.clear();
|
||||
}
|
||||
|
||||
// Generate the regex to store in too the Lazy Static HashMap.
|
||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
||||
regex_hashmap_write.insert(blacklist.to_string(), blacklist_regex);
|
||||
drop(regex_hashmap_write);
|
||||
|
||||
regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||
regex_hashmap.get(&blacklist).unwrap()
|
||||
};
|
||||
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap.
|
||||
if regex.is_match(&domain) {
|
||||
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
|
||||
is_blacklisted = true;
|
||||
@@ -93,39 +238,38 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
||||
is_blacklisted
|
||||
}
|
||||
|
||||
fn get_icon(domain: &str) -> Vec<u8> {
|
||||
fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||
|
||||
// Check for expiration of negatively cached copy
|
||||
if icon_is_negcached(&path) {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(icon) = get_cached_icon(&path) {
|
||||
return icon;
|
||||
return Some(icon);
|
||||
}
|
||||
|
||||
if CONFIG.disable_icon_download() {
|
||||
return FALLBACK_ICON.to_vec();
|
||||
return None;
|
||||
}
|
||||
|
||||
// Get the icon, or fallback in case of error
|
||||
// Get the icon, or None in case of error
|
||||
match download_icon(&domain) {
|
||||
Ok(icon) => {
|
||||
save_icon(&path, &icon);
|
||||
icon
|
||||
Some(icon)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error downloading icon: {:?}", e);
|
||||
let miss_indicator = path + ".miss";
|
||||
let empty_icon = Vec::new();
|
||||
save_icon(&miss_indicator, &empty_icon);
|
||||
FALLBACK_ICON.to_vec()
|
||||
save_icon(&miss_indicator, &[]);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
// Check for expiration of negatively cached copy
|
||||
if icon_is_negcached(path) {
|
||||
return Some(FALLBACK_ICON.to_vec());
|
||||
}
|
||||
|
||||
// Check for expiration of successfully cached copy
|
||||
if icon_is_expired(path) {
|
||||
return None;
|
||||
@@ -182,11 +326,55 @@ struct Icon {
|
||||
}
|
||||
|
||||
impl Icon {
|
||||
fn new(priority: u8, href: String) -> Self {
|
||||
const fn new(priority: u8, href: String) -> Self {
|
||||
Self { href, priority }
|
||||
}
|
||||
}
|
||||
|
||||
fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Vec<Icon>, url: &Url) {
|
||||
if let markup5ever_rcdom::NodeData::Element { name, attrs, .. } = &node.data {
|
||||
if name.local.as_ref() == "link" {
|
||||
let mut has_rel = false;
|
||||
let mut href = None;
|
||||
let mut sizes = None;
|
||||
|
||||
let attrs = attrs.borrow();
|
||||
for attr in attrs.iter() {
|
||||
let attr_name = attr.name.local.as_ref();
|
||||
let attr_value = attr.value.as_ref();
|
||||
|
||||
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) {
|
||||
has_rel = true;
|
||||
} else if attr_name == "href" {
|
||||
href = Some(attr_value);
|
||||
} else if attr_name == "sizes" {
|
||||
sizes = Some(attr_value);
|
||||
}
|
||||
}
|
||||
|
||||
if has_rel {
|
||||
if let Some(inner_href) = href {
|
||||
if let Ok(full_href) = url.join(&inner_href).map(|h| h.into_string()) {
|
||||
let priority = get_icon_priority(&full_href, sizes);
|
||||
icons.push(Icon::new(priority, full_href));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Might want to limit the recursion depth?
|
||||
for child in node.children.borrow().iter() {
|
||||
get_favicons_node(child, icons, url);
|
||||
}
|
||||
}
|
||||
|
||||
struct IconUrlResult {
|
||||
iconlist: Vec<Icon>,
|
||||
cookies: String,
|
||||
referer: String,
|
||||
}
|
||||
|
||||
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
|
||||
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
|
||||
/// This does not mean that that location does exists, but it is the default location browser use.
|
||||
@@ -199,24 +387,65 @@ impl Icon {
|
||||
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
|
||||
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
|
||||
/// ```
|
||||
fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
||||
fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
// Default URL with secure and insecure schemes
|
||||
let ssldomain = format!("https://{}", domain);
|
||||
let httpdomain = format!("http://{}", domain);
|
||||
|
||||
// First check the domain as given during the request for both HTTPS and HTTP.
|
||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)) {
|
||||
Ok(c) => Ok(c),
|
||||
Err(e) => {
|
||||
let mut sub_resp = Err(e);
|
||||
|
||||
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
||||
let is_ip = domain.parse::<IpAddr>();
|
||||
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
||||
let mut domain_parts = domain.split('.');
|
||||
let base_domain = format!(
|
||||
"{base}.{tld}",
|
||||
tld = domain_parts.next_back().unwrap(),
|
||||
base = domain_parts.next_back().unwrap()
|
||||
);
|
||||
if is_valid_domain(&base_domain) {
|
||||
let sslbase = format!("https://{}", base_domain);
|
||||
let httpbase = format!("http://{}", base_domain);
|
||||
debug!("[get_icon_url]: Trying without subdomains '{}'", base_domain);
|
||||
|
||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase));
|
||||
}
|
||||
|
||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||
let www_domain = format!("www.{}", domain);
|
||||
if is_valid_domain(&www_domain) {
|
||||
let sslwww = format!("https://{}", www_domain);
|
||||
let httpwww = format!("http://{}", www_domain);
|
||||
debug!("[get_icon_url]: Trying with www. prefix '{}'", www_domain);
|
||||
|
||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww));
|
||||
}
|
||||
}
|
||||
|
||||
sub_resp
|
||||
}
|
||||
};
|
||||
|
||||
// Create the iconlist
|
||||
let mut iconlist: Vec<Icon> = Vec::new();
|
||||
|
||||
// Create the cookie_str to fill it all the cookies from the response
|
||||
// These cookies can be used to request/download the favicon image.
|
||||
// Some sites have extra security in place with for example XSRF Tokens.
|
||||
let mut cookie_str = String::new();
|
||||
let mut cookie_str = "".to_string();
|
||||
let mut referer = "".to_string();
|
||||
|
||||
let resp = get_page(&ssldomain).or_else(|_| get_page(&httpdomain));
|
||||
if let Ok(mut content) = resp {
|
||||
if let Ok(content) = resp {
|
||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
||||
let url = content.url().clone();
|
||||
|
||||
// Get all the cookies and pass it on to the next function.
|
||||
// Needed for XSRF Cookies for example (like @ mijn.ing.nl)
|
||||
let raw_cookies = content.headers().get_all("set-cookie");
|
||||
cookie_str = raw_cookies
|
||||
.iter()
|
||||
@@ -230,31 +459,23 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
||||
})
|
||||
.collect::<String>();
|
||||
|
||||
// Set the referer to be used on the final request, some sites check this.
|
||||
// Mostly used to prevent direct linking and other security resons.
|
||||
referer = url.as_str().to_string();
|
||||
|
||||
// Add the default favicon.ico to the list with the domain the content responded from.
|
||||
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
|
||||
|
||||
// 512KB should be more than enough for the HTML, though as we only really need
|
||||
// the HTML header, it could potentially be reduced even further
|
||||
let limited_reader = crate::util::LimitedReader::new(&mut content, 512 * 1024);
|
||||
let mut limited_reader = content.take(512 * 1024);
|
||||
|
||||
let soup = Soup::from_reader(limited_reader)?;
|
||||
// Search for and filter
|
||||
let favicons = soup
|
||||
.tag("link")
|
||||
.attr("rel", Regex::new(r"icon$|apple.*icon")?) // Only use icon rels
|
||||
.attr("href", Regex::new(r"(?i)\w+\.(jpg|jpeg|png|ico)(\?.*)?$|^data:image.*base64")?) // Only allow specific extensions
|
||||
.find_all();
|
||||
use html5ever::tendril::TendrilSink;
|
||||
let dom = html5ever::parse_document(markup5ever_rcdom::RcDom::default(), Default::default())
|
||||
.from_utf8()
|
||||
.read_from(&mut limited_reader)?;
|
||||
|
||||
// Loop through all the found icons and determine it's priority
|
||||
for favicon in favicons {
|
||||
let sizes = favicon.get("sizes");
|
||||
let href = favicon.get("href").expect("Missing href");
|
||||
let full_href = url.join(&href).unwrap().into_string();
|
||||
|
||||
let priority = get_icon_priority(&full_href, sizes);
|
||||
|
||||
iconlist.push(Icon::new(priority, full_href))
|
||||
}
|
||||
get_favicons_node(&dom.document, &mut iconlist, &url);
|
||||
} else {
|
||||
// Add the default favicon.ico to the list with just the given domain
|
||||
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
|
||||
@@ -265,28 +486,33 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
||||
iconlist.sort_by_key(|x| x.priority);
|
||||
|
||||
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
||||
Ok((iconlist, cookie_str))
|
||||
Ok(IconUrlResult{
|
||||
iconlist,
|
||||
cookies: cookie_str,
|
||||
referer
|
||||
})
|
||||
}
|
||||
|
||||
fn get_page(url: &str) -> Result<Response, Error> {
|
||||
get_page_with_cookies(url, "")
|
||||
get_page_with_cookies(url, "", "")
|
||||
}
|
||||
|
||||
fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> {
|
||||
if check_icon_domain_is_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||
err!("Favicon rel linked to a non blacklisted domain!");
|
||||
fn get_page_with_cookies(url: &str, cookie_str: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||
err!("Favicon rel linked to a blacklisted domain!");
|
||||
}
|
||||
|
||||
if cookie_str.is_empty() {
|
||||
CLIENT.get(url).send()?.error_for_status().map_err(Into::into)
|
||||
} else {
|
||||
CLIENT
|
||||
.get(url)
|
||||
.header("cookie", cookie_str)
|
||||
.send()?
|
||||
let mut client = CLIENT.get(url);
|
||||
if !cookie_str.is_empty() {
|
||||
client = client.header("Cookie", cookie_str)
|
||||
}
|
||||
if !referer.is_empty() {
|
||||
client = client.header("Referer", referer)
|
||||
}
|
||||
|
||||
client.send()?
|
||||
.error_for_status()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||
@@ -301,7 +527,7 @@ fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error>
|
||||
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
|
||||
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||
/// ```
|
||||
fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
||||
fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 {
|
||||
// Check if there is a dimension set
|
||||
let (width, height) = parse_sizes(sizes);
|
||||
|
||||
@@ -314,7 +540,7 @@ fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
||||
1
|
||||
} else if width == 64 {
|
||||
2
|
||||
} else if width >= 24 && width <= 128 {
|
||||
} else if (24..=128).contains(&width) {
|
||||
3
|
||||
} else if width == 16 {
|
||||
4
|
||||
@@ -349,12 +575,12 @@ fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
||||
/// let (width, height) = parse_sizes("x128x128"); // (128, 128)
|
||||
/// let (width, height) = parse_sizes("32"); // (0, 0)
|
||||
/// ```
|
||||
fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
||||
fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
|
||||
let mut width: u16 = 0;
|
||||
let mut height: u16 = 0;
|
||||
|
||||
if let Some(sizes) = sizes {
|
||||
match Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap().captures(sizes.trim()) {
|
||||
match ICON_SIZE_REGEX.captures(sizes.trim()) {
|
||||
None => {}
|
||||
Some(dimensions) => {
|
||||
if dimensions.len() >= 3 {
|
||||
@@ -369,17 +595,17 @@ fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
||||
}
|
||||
|
||||
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||
if check_icon_domain_is_blacklisted(domain) {
|
||||
if is_domain_blacklisted(domain) {
|
||||
err!("Domain is blacklisted", domain)
|
||||
}
|
||||
|
||||
let (iconlist, cookie_str) = get_icon_url(&domain)?;
|
||||
let icon_result = get_icon_url(&domain)?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
use data_url::DataUrl;
|
||||
|
||||
for icon in iconlist.iter().take(5) {
|
||||
for icon in icon_result.iconlist.iter().take(5) {
|
||||
if icon.href.starts_with("data:image") {
|
||||
let datauri = DataUrl::process(&icon.href).unwrap();
|
||||
// Check if we are able to decode the data uri
|
||||
@@ -394,13 +620,13 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||
_ => warn!("data uri is invalid"),
|
||||
};
|
||||
} else {
|
||||
match get_page_with_cookies(&icon.href, &cookie_str) {
|
||||
match get_page_with_cookies(&icon.href, &icon_result.cookies, &icon_result.referer) {
|
||||
Ok(mut res) => {
|
||||
info!("Downloaded icon from {}", icon.href);
|
||||
res.copy_to(&mut buffer)?;
|
||||
break;
|
||||
}
|
||||
Err(_) => info!("Download failed for {}", icon.href),
|
||||
},
|
||||
_ => warn!("Download failed for {}", icon.href),
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -425,25 +651,3 @@ fn save_icon(path: &str, icon: &[u8]) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn _header_map() -> HeaderMap {
|
||||
// Set some default headers for the request.
|
||||
// Use a browser like user-agent to make sure most websites will return there correct website.
|
||||
use reqwest::header::*;
|
||||
|
||||
macro_rules! headers {
|
||||
($( $name:ident : $value:literal),+ $(,)? ) => {
|
||||
let mut headers = HeaderMap::new();
|
||||
$( headers.insert($name, HeaderValue::from_static($value)); )+
|
||||
headers
|
||||
};
|
||||
}
|
||||
|
||||
headers! {
|
||||
USER_AGENT: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
|
||||
ACCEPT_LANGUAGE: "en-US,en;q=0.8",
|
||||
CACHE_CONTROL: "no-cache",
|
||||
PRAGMA: "no-cache",
|
||||
ACCEPT: "text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8",
|
||||
}
|
||||
}
|
||||
|
@@ -1,19 +1,22 @@
|
||||
use chrono::Utc;
|
||||
use chrono::Local;
|
||||
use num_traits::FromPrimitive;
|
||||
use rocket::request::{Form, FormItems, FromForm};
|
||||
use rocket::Route;
|
||||
use rocket::{
|
||||
request::{Form, FormItems, FromForm},
|
||||
Route,
|
||||
};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api::core::two_factor::email::EmailTokenData;
|
||||
use crate::api::core::two_factor::{duo, email, yubikey};
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
||||
use crate::auth::ClientIp;
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::mail;
|
||||
use crate::util;
|
||||
use crate::CONFIG;
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::{duo, email, email::EmailTokenData, yubikey},
|
||||
ApiResult, EmptyResult, JsonResult,
|
||||
},
|
||||
auth::ClientIp,
|
||||
db::{models::*, DbConn},
|
||||
error::MapResult,
|
||||
mail, util, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![login]
|
||||
@@ -38,7 +41,7 @@ fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_password_login(data, conn, ip)
|
||||
_password_login(data, conn, &ip)
|
||||
}
|
||||
t => err!("Invalid type", t),
|
||||
}
|
||||
@@ -49,10 +52,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
let token = data.refresh_token.unwrap();
|
||||
|
||||
// Get device by refresh token
|
||||
let mut device = match Device::find_by_refresh_token(&token, &conn) {
|
||||
Some(device) => device,
|
||||
None => err!("Invalid refresh token"),
|
||||
};
|
||||
let mut device = Device::find_by_refresh_token(&token, &conn).map_res("Invalid refresh token")?;
|
||||
|
||||
// COMMON
|
||||
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
||||
@@ -68,10 +68,15 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
"refresh_token": device.refresh_token,
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"scope": "api offline_access"
|
||||
})))
|
||||
}
|
||||
|
||||
fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult {
|
||||
// Validate scope
|
||||
let scope = data.scope.as_ref().unwrap();
|
||||
if scope != "api offline_access" {
|
||||
@@ -97,8 +102,18 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
||||
)
|
||||
}
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
)
|
||||
}
|
||||
|
||||
let now = Local::now();
|
||||
|
||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||
let now = Utc::now().naive_utc();
|
||||
let now = now.naive_utc();
|
||||
if user.last_verifying_at.is_none() || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() > CONFIG.signups_verify_resend_time() as i64 {
|
||||
let resend_limit = CONFIG.signups_verify_resend_limit() as i32;
|
||||
if resend_limit == 0 || user.login_verify_count < resend_limit {
|
||||
@@ -127,10 +142,10 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
||||
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &conn)?;
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &ip, &conn)?;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &device.updated_at, &device.name) {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
@@ -140,7 +155,6 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
||||
}
|
||||
|
||||
// Common
|
||||
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
||||
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
||||
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
||||
@@ -154,6 +168,11 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
//"TwoFactorToken": "11122233333444555666777888999"
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false,// TODO: Same as above
|
||||
"scope": "api offline_access"
|
||||
});
|
||||
|
||||
if let Some(token) = twofactor_token {
|
||||
@@ -197,6 +216,7 @@ fn twofactor_auth(
|
||||
user_uuid: &str,
|
||||
data: &ConnectData,
|
||||
device: &mut Device,
|
||||
ip: &ClientIp,
|
||||
conn: &DbConn,
|
||||
) -> ApiResult<Option<String>> {
|
||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn);
|
||||
@@ -216,8 +236,7 @@ fn twofactor_auth(
|
||||
|
||||
let selected_twofactor = twofactors
|
||||
.into_iter()
|
||||
.filter(|tf| tf.atype == selected_id && tf.enabled)
|
||||
.nth(0);
|
||||
.find(|tf| tf.atype == selected_id && tf.enabled);
|
||||
|
||||
use crate::api::core::two_factor as _tf;
|
||||
use crate::crypto::ct_eq;
|
||||
@@ -226,7 +245,7 @@ fn twofactor_auth(
|
||||
let mut remember = data.two_factor_remember.unwrap_or(0);
|
||||
|
||||
match TwoFactorType::from_i32(selected_id) {
|
||||
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, conn)?,
|
||||
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?,
|
||||
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
||||
Some(TwoFactorType::Duo) => _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
|
||||
@@ -252,10 +271,7 @@ fn twofactor_auth(
|
||||
}
|
||||
|
||||
fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
||||
match tf {
|
||||
Some(tf) => Ok(tf.data),
|
||||
None => err!("Two factor doesn't exist"),
|
||||
}
|
||||
tf.map(|t| t.data).map_res("Two factor doesn't exist")
|
||||
}
|
||||
|
||||
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
|
||||
|
@@ -1,27 +1,30 @@
|
||||
mod admin;
|
||||
pub(crate) mod core;
|
||||
pub mod core;
|
||||
mod icons;
|
||||
mod identity;
|
||||
mod notifications;
|
||||
mod web;
|
||||
|
||||
pub use self::admin::routes as admin_routes;
|
||||
pub use self::core::routes as core_routes;
|
||||
pub use self::icons::routes as icons_routes;
|
||||
pub use self::identity::routes as identity_routes;
|
||||
pub use self::notifications::routes as notifications_routes;
|
||||
pub use self::notifications::{start_notification_server, Notify, UpdateType};
|
||||
pub use self::web::routes as web_routes;
|
||||
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
pub use crate::api::{
|
||||
admin::routes as admin_routes,
|
||||
core::routes as core_routes,
|
||||
core::start_send_deletion_scheduler,
|
||||
icons::routes as icons_routes,
|
||||
identity::routes as identity_routes,
|
||||
notifications::routes as notifications_routes,
|
||||
notifications::{start_notification_server, Notify, UpdateType},
|
||||
web::routes as web_routes,
|
||||
};
|
||||
use crate::util;
|
||||
|
||||
// Type aliases for API methods results
|
||||
type ApiResult<T> = Result<T, crate::error::Error>;
|
||||
pub type JsonResult = ApiResult<Json<Value>>;
|
||||
pub type EmptyResult = ApiResult<()>;
|
||||
|
||||
use crate::util;
|
||||
type JsonUpcase<T> = Json<util::UpCase<T>>;
|
||||
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
|
||||
|
||||
|
@@ -4,11 +4,12 @@ use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult};
|
||||
use crate::auth::Headers;
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::{Error, CONFIG};
|
||||
use crate::{
|
||||
api::EmptyResult,
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
Error, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![negotiate, websockets_err]
|
||||
@@ -18,18 +19,19 @@ static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
||||
|
||||
#[get("/hub")]
|
||||
fn websockets_err() -> EmptyResult {
|
||||
if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_and_swap(true, false, Ordering::Relaxed) {
|
||||
err!("###########################################################
|
||||
if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok() {
|
||||
err!("
|
||||
###########################################################
|
||||
'/notifications/hub' should be proxied to the websocket server or notifications won't work.
|
||||
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
|
||||
###########################################################################################")
|
||||
###########################################################################################\n")
|
||||
} else {
|
||||
Err(Error::empty())
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/hub/negotiate")]
|
||||
fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
fn negotiate(_headers: Headers, _conn: DbConn) -> Json<JsonValue> {
|
||||
use crate::crypto;
|
||||
use data_encoding::BASE64URL;
|
||||
|
||||
@@ -45,10 +47,10 @@ fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
|
||||
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
||||
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"connectionId": conn_id,
|
||||
"availableTransports": available_transports
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
//
|
||||
@@ -118,7 +120,7 @@ fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
|
||||
}
|
||||
|
||||
// Server WebSocket handler
|
||||
pub struct WSHandler {
|
||||
pub struct WsHandler {
|
||||
out: Sender,
|
||||
user_uuid: Option<String>,
|
||||
users: WebSocketUsers,
|
||||
@@ -136,10 +138,9 @@ struct InitialMessage {
|
||||
const PING_MS: u64 = 15_000;
|
||||
const PING: Token = Token(1);
|
||||
|
||||
const ID_KEY: &str = "id=";
|
||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||
|
||||
impl WSHandler {
|
||||
impl WsHandler {
|
||||
fn err(&self, msg: &'static str) -> ws::Result<()> {
|
||||
self.out.close(ws::CloseCode::Invalid)?;
|
||||
|
||||
@@ -147,42 +148,50 @@ impl WSHandler {
|
||||
let io_error = io::Error::from(io::ErrorKind::InvalidData);
|
||||
Err(ws::Error::new(ws::ErrorKind::Io(io_error), msg))
|
||||
}
|
||||
|
||||
fn get_request_token(&self, hs: Handshake) -> Option<String> {
|
||||
use std::str::from_utf8;
|
||||
|
||||
// Verify we have a token header
|
||||
if let Some(header_value) = hs.request.header("Authorization") {
|
||||
if let Ok(converted) = from_utf8(header_value) {
|
||||
if let Some(token_part) = converted.split("Bearer ").nth(1) {
|
||||
return Some(token_part.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Otherwise verify the query parameter value
|
||||
let path = hs.request.resource();
|
||||
if let Some(params) = path.split('?').nth(1) {
|
||||
let params_iter = params.split('&').take(1);
|
||||
for val in params_iter {
|
||||
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
||||
return Some(stripped.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler for WSHandler {
|
||||
impl Handler for WsHandler {
|
||||
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
||||
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
|
||||
//
|
||||
// We don't use `id`, and as of around 2020-03-25, the official clients
|
||||
// no longer seem to pass `id` (only `access_token`).
|
||||
let path = hs.request.resource();
|
||||
|
||||
let (_id, access_token) = match path.split('?').nth(1) {
|
||||
Some(params) => {
|
||||
let mut params_iter = params.split('&').take(2);
|
||||
|
||||
let mut id = None;
|
||||
let mut access_token = None;
|
||||
while let Some(val) = params_iter.next() {
|
||||
if val.starts_with(ID_KEY) {
|
||||
id = Some(&val[ID_KEY.len()..]);
|
||||
} else if val.starts_with(ACCESS_TOKEN_KEY) {
|
||||
access_token = Some(&val[ACCESS_TOKEN_KEY.len()..]);
|
||||
}
|
||||
}
|
||||
|
||||
match (id, access_token) {
|
||||
(Some(a), Some(b)) => (a, b),
|
||||
(None, Some(b)) => ("", b), // Ignore missing `id`.
|
||||
// Get user token from header or query parameter
|
||||
let access_token = match self.get_request_token(hs) {
|
||||
Some(token) => token,
|
||||
_ => return self.err("Missing access token"),
|
||||
}
|
||||
}
|
||||
None => return self.err("Missing query parameters"),
|
||||
};
|
||||
|
||||
// Validate the user
|
||||
use crate::auth;
|
||||
let claims = match auth::decode_login(access_token) {
|
||||
let claims = match auth::decode_login(access_token.as_str()) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => return self.err("Invalid access token provided"),
|
||||
};
|
||||
@@ -231,13 +240,13 @@ impl Handler for WSHandler {
|
||||
}
|
||||
}
|
||||
|
||||
struct WSFactory {
|
||||
struct WsFactory {
|
||||
pub users: WebSocketUsers,
|
||||
}
|
||||
|
||||
impl WSFactory {
|
||||
impl WsFactory {
|
||||
pub fn init() -> Self {
|
||||
WSFactory {
|
||||
WsFactory {
|
||||
users: WebSocketUsers {
|
||||
map: Arc::new(CHashMap::new()),
|
||||
},
|
||||
@@ -245,11 +254,11 @@ impl WSFactory {
|
||||
}
|
||||
}
|
||||
|
||||
impl Factory for WSFactory {
|
||||
type Handler = WSHandler;
|
||||
impl Factory for WsFactory {
|
||||
type Handler = WsHandler;
|
||||
|
||||
fn connection_made(&mut self, out: Sender) -> Self::Handler {
|
||||
WSHandler {
|
||||
WsHandler {
|
||||
out,
|
||||
user_uuid: None,
|
||||
users: self.users.clone(),
|
||||
@@ -260,7 +269,9 @@ impl Factory for WSFactory {
|
||||
// Remove handler
|
||||
if let Some(user_uuid) = &handler.user_uuid {
|
||||
if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) {
|
||||
user_conn.remove_item(&handler.out);
|
||||
if let Some(pos) = user_conn.iter().position(|x| x == &handler.out) {
|
||||
user_conn.remove(pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -331,7 +342,7 @@ impl WebSocketUsers {
|
||||
/* Message Structure
|
||||
[
|
||||
1, // MessageType.Invocation
|
||||
{}, // Headers
|
||||
{}, // Headers (map)
|
||||
null, // InvocationId
|
||||
"ReceiveMessage", // Target
|
||||
[ // Arguments
|
||||
@@ -348,7 +359,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
|
||||
|
||||
let value = V::Array(vec![
|
||||
1.into(),
|
||||
V::Array(vec![]),
|
||||
V::Map(vec![]),
|
||||
V::Nil,
|
||||
"ReceiveMessage".into(),
|
||||
V::Array(vec![V::Map(vec![
|
||||
@@ -383,6 +394,10 @@ pub enum UpdateType {
|
||||
|
||||
LogOut = 11,
|
||||
|
||||
SyncSendCreate = 12,
|
||||
SyncSendUpdate = 13,
|
||||
SyncSendDelete = 14,
|
||||
|
||||
None = 100,
|
||||
}
|
||||
|
||||
@@ -390,15 +405,17 @@ use rocket::State;
|
||||
pub type Notify<'a> = State<'a, WebSocketUsers>;
|
||||
|
||||
pub fn start_notification_server() -> WebSocketUsers {
|
||||
let factory = WSFactory::init();
|
||||
let factory = WsFactory::init();
|
||||
let users = factory.users.clone();
|
||||
|
||||
if CONFIG.websocket_enabled() {
|
||||
thread::spawn(move || {
|
||||
let mut settings = ws::Settings::default();
|
||||
settings.max_connections = 500;
|
||||
settings.queue_size = 2;
|
||||
settings.panic_on_internal = false;
|
||||
let settings = ws::Settings {
|
||||
max_connections: 500,
|
||||
queue_size: 2,
|
||||
panic_on_internal: false,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
ws::Builder::new()
|
||||
.with_settings(settings)
|
||||
|
@@ -1,21 +1,16 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rocket::http::ContentType;
|
||||
use rocket::response::content::Content;
|
||||
use rocket::response::NamedFile;
|
||||
use rocket::Route;
|
||||
use rocket::{http::ContentType, response::content::Content, response::NamedFile, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::util::Cached;
|
||||
use crate::CONFIG;
|
||||
use crate::{error::Error, util::Cached, CONFIG};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
// If addding more routes here, consider also adding them to
|
||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||
if CONFIG.web_vault_enabled() {
|
||||
routes![web_index, app_id, web_files, attachments, alive, static_files]
|
||||
routes![web_index, app_id, web_files, attachments, sends, alive, static_files]
|
||||
} else {
|
||||
routes![attachments, alive, static_files]
|
||||
}
|
||||
@@ -65,6 +60,11 @@ fn attachments(uuid: String, file: PathBuf) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file)).ok()
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>")]
|
||||
fn sends(send_id: String, file_id: String) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).ok()
|
||||
}
|
||||
|
||||
#[get("/alive")]
|
||||
fn alive() -> Json<String> {
|
||||
use crate::util::format_date;
|
||||
@@ -78,13 +78,16 @@ fn static_files(filename: String) -> Result<Content<&'static [u8]>, Error> {
|
||||
match filename.as_ref() {
|
||||
"mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||
"logo-gray.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||
"shield-white.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/shield-white.png"))),
|
||||
"error-x.svg" => Ok(Content(ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
|
||||
"hibp.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
|
||||
"bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native-v4.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native-v4.js"))),
|
||||
"md5.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/md5.js"))),
|
||||
"bootstrap-native.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||
"identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
|
||||
"datatables.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.5.1.slim.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.5.1.slim.js"))),
|
||||
_ => err!(format!("Static file not found: {}", filename)),
|
||||
}
|
||||
}
|
||||
|
259
src/auth.rs
259
src/auth.rs
@@ -1,17 +1,19 @@
|
||||
//
|
||||
// JWT Handling
|
||||
//
|
||||
use crate::util::read_file;
|
||||
use chrono::{Duration, Utc};
|
||||
use once_cell::sync::Lazy;
|
||||
use num_traits::FromPrimitive;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use jsonwebtoken::{self, Algorithm, Header, EncodingKey, DecodingKey};
|
||||
use jsonwebtoken::{self, Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::ser::Serialize;
|
||||
|
||||
use crate::error::{Error, MapResult};
|
||||
use crate::CONFIG;
|
||||
use crate::{
|
||||
error::{Error, MapResult},
|
||||
util::read_file,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||
|
||||
@@ -56,28 +58,28 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
|
||||
.map_res("Error decoding JWT")
|
||||
}
|
||||
|
||||
pub fn decode_login(token: &str) -> Result<LoginJWTClaims, Error> {
|
||||
pub fn decode_login(token: &str) -> Result<LoginJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_LOGIN_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_invite(token: &str) -> Result<InviteJWTClaims, Error> {
|
||||
pub fn decode_invite(token: &str) -> Result<InviteJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_INVITE_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_delete(token: &str) -> Result<DeleteJWTClaims, Error> {
|
||||
pub fn decode_delete(token: &str) -> Result<DeleteJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_DELETE_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_verify_email(token: &str) -> Result<VerifyEmailJWTClaims, Error> {
|
||||
pub fn decode_verify_email(token: &str) -> Result<VerifyEmailJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_VERIFYEMAIL_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_admin(token: &str) -> Result<AdminJWTClaims, Error> {
|
||||
pub fn decode_admin(token: &str) -> Result<AdminJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_ADMIN_ISSUER.to_string())
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoginJWTClaims {
|
||||
pub struct LoginJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -108,7 +110,7 @@ pub struct LoginJWTClaims {
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct InviteJWTClaims {
|
||||
pub struct InviteJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -130,9 +132,9 @@ pub fn generate_invite_claims(
|
||||
org_id: Option<String>,
|
||||
user_org_id: Option<String>,
|
||||
invited_by_email: Option<String>,
|
||||
) -> InviteJWTClaims {
|
||||
) -> InviteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
InviteJWTClaims {
|
||||
InviteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
iss: JWT_INVITE_ISSUER.to_string(),
|
||||
@@ -145,7 +147,7 @@ pub fn generate_invite_claims(
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DeleteJWTClaims {
|
||||
pub struct DeleteJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -156,9 +158,9 @@ pub struct DeleteJWTClaims {
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_delete_claims(uuid: String) -> DeleteJWTClaims {
|
||||
pub fn generate_delete_claims(uuid: String) -> DeleteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
DeleteJWTClaims {
|
||||
DeleteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
iss: JWT_DELETE_ISSUER.to_string(),
|
||||
@@ -167,7 +169,7 @@ pub fn generate_delete_claims(uuid: String) -> DeleteJWTClaims {
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifyEmailJWTClaims {
|
||||
pub struct VerifyEmailJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -178,9 +180,9 @@ pub struct VerifyEmailJWTClaims {
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_verify_email_claims(uuid: String) -> DeleteJWTClaims {
|
||||
pub fn generate_verify_email_claims(uuid: String) -> DeleteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
DeleteJWTClaims {
|
||||
DeleteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||
@@ -189,7 +191,7 @@ pub fn generate_verify_email_claims(uuid: String) -> DeleteJWTClaims {
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AdminJWTClaims {
|
||||
pub struct AdminJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -200,9 +202,9 @@ pub struct AdminJWTClaims {
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_admin_claims() -> AdminJWTClaims {
|
||||
pub fn generate_admin_claims() -> AdminJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
AdminJWTClaims {
|
||||
AdminJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(20)).timestamp(),
|
||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||
@@ -213,22 +215,22 @@ pub fn generate_admin_claims() -> AdminJWTClaims {
|
||||
//
|
||||
// Bearer token authentication
|
||||
//
|
||||
use rocket::request::{self, FromRequest, Request};
|
||||
use rocket::Outcome;
|
||||
use rocket::request::{FromRequest, Outcome, Request};
|
||||
|
||||
use crate::db::models::{Device, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
use crate::db::DbConn;
|
||||
use crate::db::{
|
||||
models::{CollectionUser, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
|
||||
DbConn,
|
||||
};
|
||||
|
||||
pub struct Headers {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub struct Host {
|
||||
pub host: String
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Host {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
// Get host
|
||||
@@ -259,6 +261,28 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
format!("{}://{}", protocol, host)
|
||||
};
|
||||
|
||||
Outcome::Success(Host { host })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Headers {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
let host = match Host::from_request(request) {
|
||||
Outcome::Forward(_) => return Outcome::Forward(()),
|
||||
Outcome::Failure(f) => return Outcome::Failure(f),
|
||||
Outcome::Success(host) => host.host,
|
||||
};
|
||||
|
||||
// Get access_token
|
||||
let access_token: &str = match headers.get_one("Authorization") {
|
||||
Some(a) => match a.rsplit("Bearer ").next() {
|
||||
@@ -293,8 +317,26 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
};
|
||||
|
||||
if user.security_stamp != claims.sstamp {
|
||||
if let Some(stamp_exception) = user
|
||||
.stamp_exception
|
||||
.as_deref()
|
||||
.and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||
{
|
||||
let current_route = match request.route().and_then(|r| r.name) {
|
||||
Some(name) => name,
|
||||
_ => err_handler!("Error getting current route for stamp exception"),
|
||||
};
|
||||
|
||||
// Check if both match, if not this route is not allowed with the current security stamp.
|
||||
if stamp_exception.route != current_route {
|
||||
err_handler!("Invalid security stamp: Current route and exception route do not match")
|
||||
} else if stamp_exception.security_stamp != claims.sstamp {
|
||||
err_handler!("Invalid security stamp for matched stamp exception")
|
||||
}
|
||||
} else {
|
||||
err_handler!("Invalid security stamp")
|
||||
}
|
||||
}
|
||||
|
||||
Outcome::Success(Headers { host, device, user })
|
||||
}
|
||||
@@ -305,11 +347,13 @@ pub struct OrgHeaders {
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub org_user: UserOrganization,
|
||||
pub org_id: String,
|
||||
}
|
||||
|
||||
// org_id is usually the second param ("/organizations/<org_id>")
|
||||
// But there are cases where it is located in a query value.
|
||||
// First check the param, if this is not a valid uuid, we will try the query value.
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
// but there are cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_org_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(org_id)) = request.get_param::<String>(1) {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
@@ -329,7 +373,7 @@ fn get_org_id(request: &Request) -> Option<String> {
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<Headers>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
@@ -365,8 +409,10 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
||||
err_handler!("Unknown user type in the database")
|
||||
}
|
||||
},
|
||||
org_user,
|
||||
org_id,
|
||||
})
|
||||
},
|
||||
}
|
||||
_ => err_handler!("Error getting the organization id"),
|
||||
}
|
||||
}
|
||||
@@ -384,7 +430,7 @@ pub struct AdminHeaders {
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
@@ -404,12 +450,133 @@ impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Headers> for AdminHeaders {
|
||||
fn into(self) -> Headers {
|
||||
impl From<AdminHeaders> for Headers {
|
||||
fn from(h: AdminHeaders) -> Headers {
|
||||
Headers {
|
||||
host: self.host,
|
||||
device: self.device,
|
||||
user: self.user
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
|
||||
// but there could be cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_col_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(col_id)) = request.get_param::<String>(3) {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(col_id)) = request.get_query_value::<String>("collectionId") {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// The ManagerHeaders are used to check if you are at least a Manager
|
||||
/// and have access to the specific collection provided via the <col_id>/collections/collectionId.
|
||||
/// This does strict checking on the collection_id, ManagerHeadersLoose does not.
|
||||
pub struct ManagerHeaders {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
match get_col_id(request) {
|
||||
Some(col_id) => {
|
||||
let conn = match request.guard::<DbConn>() {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !headers.org_user.has_full_access() {
|
||||
match CollectionUser::find_by_collection_and_user(&col_id, &headers.org_user.user_uuid, &conn) {
|
||||
Some(_) => (),
|
||||
None => err_handler!("The current user isn't a manager for this collection"),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => err_handler!("Error getting the collection id"),
|
||||
}
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ManagerHeaders> for Headers {
|
||||
fn from(h: ManagerHeaders) -> Headers {
|
||||
Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The ManagerHeadersLoose is used when you at least need to be a Manager,
|
||||
/// but there is no collection_id sent with the request (either in the path or as form data).
|
||||
pub struct ManagerHeadersLoose {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeadersLoose {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ManagerHeadersLoose> for Headers {
|
||||
fn from(h: ManagerHeadersLoose) -> Headers {
|
||||
Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -423,7 +590,7 @@ pub struct OwnerHeaders {
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for OwnerHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
@@ -454,7 +621,7 @@ pub struct ClientIp {
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ClientIp {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(req: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
let ip = if CONFIG._ip_header_enabled() {
|
||||
req.headers().get_one(&CONFIG.ip_header()).and_then(|ip| {
|
||||
match ip.find(',') {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user