Compare commits
149 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
c666497130 | ||
|
2620a1ac8c | ||
|
ffdcafa044 | ||
|
56ffec40f4 | ||
|
96c2416903 | ||
|
340d42a1ca | ||
|
e19420160f | ||
|
1741316f42 | ||
|
4f08167d6f | ||
|
fef76e2f6f | ||
|
f16d56cb27 | ||
|
120b286f2b | ||
|
7f437b6947 | ||
|
8d6e62e18b | ||
|
d0ec410b73 | ||
|
c546a59c38 | ||
|
e5ec245626 | ||
|
6ea95d1ede | ||
|
88bea44dd8 | ||
|
8ee5d51bd4 | ||
|
c640abbcd7 | ||
|
13598c098f | ||
|
a622b4d2fb | ||
|
403f35b571 | ||
|
3968bc8016 | ||
|
ff66368cb6 | ||
|
3fb419e704 | ||
|
832f838ddd | ||
|
18703bf195 | ||
|
ff8e88a5df | ||
|
72e1946ce5 | ||
|
ee391720aa | ||
|
e3a2dfffab | ||
|
8bf1278b1b | ||
|
00ce943ea5 | ||
|
b67eacdfde | ||
|
0dcea75764 | ||
|
0c5532d8b5 | ||
|
46e0f3c43a | ||
|
2cd17fe7af | ||
|
f44b2611e6 | ||
|
82fee0ede3 | ||
|
49579e4ce7 | ||
|
9254cf9d9c | ||
|
ff0fee3690 | ||
|
0778bd4bd5 | ||
|
0cd065d354 | ||
|
8615736e84 | ||
|
5772836be5 | ||
|
c380d9c379 | ||
|
cea7a30d82 | ||
|
06cde29419 | ||
|
20f5988174 | ||
|
b491cfe0b0 | ||
|
fc513413ea | ||
|
3f7e4712cd | ||
|
c2ef331df9 | ||
|
5fef7983f4 | ||
|
29ed82a359 | ||
|
7d5186e40a | ||
|
99270612ba | ||
|
c7b5b6ee07 | ||
|
848d17ffb9 | ||
|
47e8aa29e1 | ||
|
f270f2ed65 | ||
|
aba5b234af | ||
|
9133e2927d | ||
|
38104ba7cf | ||
|
c42bcae224 | ||
|
764e51bbe9 | ||
|
8e6c6a1dc4 | ||
|
7a9cfc45da | ||
|
9e24b9065c | ||
|
1c2b376ca2 | ||
|
746ce2afb4 | ||
|
029008bad5 | ||
|
d3449bfa00 | ||
|
a9a5706764 | ||
|
3ff8014add | ||
|
e60bdc7efe | ||
|
cccd8262fa | ||
|
68e5d95d25 | ||
|
5f458b288a | ||
|
e9ee8ac2fa | ||
|
8a4dfc3bbe | ||
|
4f86517501 | ||
|
7cb19ef767 | ||
|
565439a914 | ||
|
b8010be26b | ||
|
f76b8a32ca | ||
|
39167d333a | ||
|
0d63132987 | ||
|
7b5d5d1302 | ||
|
0dc98bda23 | ||
|
f9a062cac8 | ||
|
6ad4ccd901 | ||
|
ee6ceaa923 | ||
|
20b393d354 | ||
|
f707f86c8e | ||
|
daea54b288 | ||
|
1e5306b820 | ||
|
6890c25ea1 | ||
|
48482fece0 | ||
|
1dc1d4df72 | ||
|
2b4dd6f137 | ||
|
cc021a4784 | ||
|
e3c4609c2a | ||
|
3da44a8d30 | ||
|
34ea10475d | ||
|
89a68741d6 | ||
|
2421d49d9a | ||
|
ced7f1771a | ||
|
af2235bf88 | ||
|
305de2e2cd | ||
|
8756c5c255 | ||
|
27609ac4cc | ||
|
95d906bdbb | ||
|
4bb0d7bc05 | ||
|
d9599155ae | ||
|
1db37bf3d0 | ||
|
d75a80bd2d | ||
|
244bad3a24 | ||
|
f7056bcaa5 | ||
|
994669fb69 | ||
|
3ab90259f2 | ||
|
155109dea1 | ||
|
b268c3dd1c | ||
|
4e64dbdde4 | ||
|
a2955daffe | ||
|
d3921b973b | ||
|
cf6ad3cb15 | ||
|
90e0b7fec6 | ||
|
d77333576b | ||
|
73ff8d79f7 | ||
|
95fc88ae5b | ||
|
1d0eaac260 | ||
|
3565bfc939 | ||
|
a82c04910f | ||
|
233f03ca2b | ||
|
93c881a7a9 | ||
|
0af3956abd | ||
|
15feff3e79 | ||
|
5c5700caa7 | ||
|
3bddc176d6 | ||
|
9caf4bf383 | ||
|
9b2234fa0e | ||
|
1f79fdec4e | ||
|
a56f4c97e4 | ||
|
3a3390963c |
@@ -4,6 +4,8 @@ target
|
||||
# Data folder
|
||||
data
|
||||
.env
|
||||
.env.template
|
||||
.gitattributes
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
|
23
.editorconfig
Normal file
@@ -0,0 +1,23 @@
|
||||
# EditorConfig is awesome: https://EditorConfig.org
|
||||
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
charset = utf-8
|
||||
|
||||
[*.{rs,py}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
@@ -1,4 +1,4 @@
|
||||
## Bitwarden_RS Configuration File
|
||||
## Vaultwarden Configuration File
|
||||
## Uncomment any of the following lines to change the defaults
|
||||
##
|
||||
## Be aware that most of these settings will be overridden if they were changed
|
||||
@@ -36,9 +36,9 @@
|
||||
## Automatically reload the templates for every request, slow, use only for development
|
||||
# RELOAD_TEMPLATES=false
|
||||
|
||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Client-IP"
|
||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||
# IP_HEADER=X-Client-IP
|
||||
# IP_HEADER=X-Real-IP
|
||||
|
||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||
# ICON_CACHE_TTL=2592000
|
||||
@@ -56,6 +56,28 @@
|
||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||
# WEBSOCKET_PORT=3012
|
||||
|
||||
## Controls whether users are allowed to create Bitwarden Sends.
|
||||
## This setting applies globally to all users.
|
||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||
# SENDS_ALLOWED=true
|
||||
|
||||
## Job scheduler settings
|
||||
##
|
||||
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
|
||||
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||
##
|
||||
## How often (in ms) the job scheduler thread checks for jobs that need running.
|
||||
## Set to 0 to globally disable scheduled jobs.
|
||||
# JOB_POLL_INTERVAL_MS=30000
|
||||
##
|
||||
## Cron schedule of the job that checks for Sends past their deletion date.
|
||||
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||
# SEND_PURGE_SCHEDULE="0 5 * * * *"
|
||||
##
|
||||
## Cron schedule of the job that checks for trashed items to delete permanently.
|
||||
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
|
||||
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
|
||||
|
||||
## Enable extended logging, which shows timestamps and targets in the logs
|
||||
# EXTENDED_LOGGING=true
|
||||
|
||||
@@ -82,9 +104,9 @@
|
||||
## Enable WAL for the DB
|
||||
## Set to false to avoid enabling WAL during startup.
|
||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||
## this setting only prevents bitwarden_rs from automatically enabling it on start.
|
||||
## this setting only prevents vaultwarden from automatically enabling it on start.
|
||||
## Please read project wiki page about this setting first before changing the value as it can
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
|
||||
## Database connection retries
|
||||
@@ -170,22 +192,30 @@
|
||||
## Invitations org admins to invite users, even when signups are disabled
|
||||
# INVITATIONS_ALLOWED=true
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Bitwarden_RS
|
||||
# INVITATION_ORG_NAME=Vaultwarden
|
||||
|
||||
## Per-organization attachment limit (KB)
|
||||
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||
## Per-organization attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per organization.
|
||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||
# ORG_ATTACHMENT_LIMIT=
|
||||
## Per-user attachment limit (KB).
|
||||
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
||||
## Per-user attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per user.
|
||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||
# USER_ATTACHMENT_LIMIT=
|
||||
|
||||
## Number of days to wait before auto-deleting a trashed item.
|
||||
## If unset (the default), trashed items are not auto-deleted.
|
||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||
# TRASH_AUTO_DELETE_DAYS=
|
||||
|
||||
## Controls the PBBKDF password iterations to apply on the server
|
||||
## The change only applies when the password is changed
|
||||
# PASSWORD_ITERATIONS=100000
|
||||
|
||||
## Whether password hint should be sent into the error response when the client request it
|
||||
# SHOW_PASSWORD_HINT=true
|
||||
## Controls whether a password hint should be shown directly in the web page if
|
||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||
## as this provides unauthenticated access to potentially sensitive data.
|
||||
# SHOW_PASSWORD_HINT=false
|
||||
|
||||
## Domain settings
|
||||
## The domain must match the address from where you access the server
|
||||
@@ -230,20 +260,21 @@
|
||||
## You can disable this, so that only the current TOTP Code is allowed.
|
||||
## Keep in mind that when a sever drifts out of time, valid codes could be marked as invalid.
|
||||
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT = false
|
||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
||||
|
||||
## Rocket specific settings, check Rocket documentation to learn more
|
||||
# ROCKET_ENV=staging
|
||||
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
|
||||
# ROCKET_PORT=8000
|
||||
## Rocket specific settings
|
||||
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
|
||||
# ROCKET_ADDRESS=0.0.0.0
|
||||
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
||||
# ROCKET_WORKERS=10
|
||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||
|
||||
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
||||
# SMTP_FROM_NAME=Bitwarden_RS
|
||||
# SMTP_FROM=vaultwarden@domain.tld
|
||||
# SMTP_FROM_NAME=Vaultwarden
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||
|
2
.gitattributes
vendored
@@ -1,3 +1,3 @@
|
||||
# Ignore vendored scripts in GitHub stats
|
||||
src/static/* linguist-vendored
|
||||
src/static/scripts/* linguist-vendored
|
||||
|
||||
|
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Use this ONLY for bugs in bitwarden_rs itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
||||
about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
@@ -8,11 +8,11 @@ assignees: ''
|
||||
---
|
||||
<!--
|
||||
# ###
|
||||
NOTE: Please update to the latest version of bitwarden_rs before reporting an issue!
|
||||
NOTE: Please update to the latest version of vaultwarden before reporting an issue!
|
||||
This saves you and us a lot of time and troubleshooting.
|
||||
See:
|
||||
* https://github.com/dani-garcia/bitwarden_rs/issues/1180
|
||||
* https://github.com/dani-garcia/bitwarden_rs/wiki/Updating-the-bitwarden-image
|
||||
* https://github.com/dani-garcia/vaultwarden/issues/1180
|
||||
* https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image
|
||||
# ###
|
||||
-->
|
||||
|
||||
@@ -37,9 +37,9 @@ such as passwords, IP addresses, and DNS names as appropriate.
|
||||
-->
|
||||
|
||||
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
||||
<!-- This is NOT the version number shown on the web vault, which is versioned separately from bitwarden_rs -->
|
||||
<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden -->
|
||||
<!-- Remember to check if your issue exists on the latest version first! -->
|
||||
* bitwarden_rs version:
|
||||
* vaultwarden version:
|
||||
|
||||
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
||||
* Install method:
|
||||
@@ -54,7 +54,7 @@ such as passwords, IP addresses, and DNS names as appropriate.
|
||||
|
||||
### Steps to reproduce
|
||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
||||
and how did you start bitwarden_rs? -->
|
||||
and how did you start vaultwarden? -->
|
||||
|
||||
### Expected behaviour
|
||||
<!-- Tell us what you expected to happen -->
|
||||
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Discourse forum for bitwarden_rs
|
||||
url: https://bitwardenrs.discourse.group/
|
||||
- name: Discourse forum for vaultwarden
|
||||
url: https://vaultwarden.discourse.group/
|
||||
about: Use this forum to request features or get help with usage/configuration.
|
||||
- name: GitHub Discussions for bitwarden_rs
|
||||
url: https://github.com/dani-garcia/bitwarden_rs/discussions
|
||||
- name: GitHub Discussions for vaultwarden
|
||||
url: https://github.com/dani-garcia/vaultwarden/discussions
|
||||
about: An alternative to the Discourse forum, if this is easier for you.
|
||||
|
BIN
.github/security-contact.gif
vendored
Normal file
After Width: | Height: | Size: 2.3 KiB |
101
.github/workflows/build.yml
vendored
@@ -2,26 +2,43 @@ name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- "*.md"
|
||||
- "*.txt"
|
||||
- ".dockerignore"
|
||||
- ".env.template"
|
||||
- ".gitattributes"
|
||||
- ".gitignore"
|
||||
- "azure-pipelines.yml"
|
||||
- "docker/**"
|
||||
- "hooks/**"
|
||||
- "tools/**"
|
||||
- ".github/FUNDING.yml"
|
||||
- ".github/ISSUE_TEMPLATE/**"
|
||||
- ".github/security-contact.gif"
|
||||
pull_request:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- "*.md"
|
||||
- "*.txt"
|
||||
- ".dockerignore"
|
||||
- ".env.template"
|
||||
- ".gitattributes"
|
||||
- ".gitignore"
|
||||
- "azure-pipelines.yml"
|
||||
- "docker/**"
|
||||
- "hooks/**"
|
||||
- "tools/**"
|
||||
- ".github/FUNDING.yml"
|
||||
- ".github/ISSUE_TEMPLATE/**"
|
||||
- ".github/security-contact.gif"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
# Make warnings errors, this is to prevent warnings slipping through.
|
||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||
env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -34,28 +51,16 @@ jobs:
|
||||
include:
|
||||
- target-triple: x86_64-unknown-linux-gnu
|
||||
host-triple: x86_64-unknown-linux-gnu
|
||||
features: "sqlite,mysql,postgresql"
|
||||
features: [sqlite,mysql,postgresql] # Remember to update the `cargo test` to match the amount of features
|
||||
channel: nightly
|
||||
os: ubuntu-18.04
|
||||
ext:
|
||||
ext: ""
|
||||
# - target-triple: x86_64-unknown-linux-gnu
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,mysql,postgresql"
|
||||
# channel: stable
|
||||
# os: ubuntu-18.04
|
||||
# ext:
|
||||
# - target-triple: x86_64-unknown-linux-musl
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,postgresql"
|
||||
# channel: nightly
|
||||
# os: ubuntu-18.04
|
||||
# ext:
|
||||
# - target-triple: x86_64-unknown-linux-musl
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,postgresql"
|
||||
# channel: stable
|
||||
# os: ubuntu-18.04
|
||||
# ext:
|
||||
# ext: ""
|
||||
|
||||
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -91,34 +96,66 @@ jobs:
|
||||
with:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target-triple }}
|
||||
components: clippy
|
||||
components: clippy, rustfmt
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Run cargo tests (In release mode to speed up future builds)
|
||||
- name: '`cargo test --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
# First test all features together, afterwards test them separately.
|
||||
- name: "`cargo test --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||
# Test single features
|
||||
# 0: sqlite
|
||||
- name: "`cargo test --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[0] != '' }}
|
||||
# 1: mysql
|
||||
- name: "`cargo test --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[1] != '' }}
|
||||
# 2: postgresql
|
||||
- name: "`cargo test --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[2] != '' }}
|
||||
# End Run cargo tests
|
||||
|
||||
|
||||
# Run cargo clippy (In release mode to speed up future builds)
|
||||
- name: '`cargo clippy --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
||||
- name: "`cargo clippy --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} -- -D warnings
|
||||
# End Run cargo clippy
|
||||
|
||||
|
||||
# Run cargo fmt
|
||||
- name: '`cargo fmt`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
# End Run cargo fmt
|
||||
|
||||
|
||||
# Build the binary
|
||||
- name: '`cargo build --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
- name: "`cargo build --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||
# End Build the binary
|
||||
|
||||
|
||||
@@ -126,8 +163,8 @@ jobs:
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||
# End Upload artifact to Github Actions
|
||||
|
||||
|
||||
@@ -138,7 +175,7 @@ jobs:
|
||||
# uses: Shopify/upload-to-release@1
|
||||
# if: startsWith(github.ref, 'refs/tags/')
|
||||
# with:
|
||||
# name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
# path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
# name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
# path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# End Upload to github actions release
|
||||
|
10
.github/workflows/hadolint.yml
vendored
@@ -1,6 +1,10 @@
|
||||
name: Hadolint
|
||||
|
||||
on:
|
||||
push:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths:
|
||||
- "docker/**"
|
||||
pull_request:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths:
|
||||
@@ -21,14 +25,14 @@ jobs:
|
||||
- name: Download hadolint
|
||||
shell: bash
|
||||
run: |
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo chmod +x /usr/local/bin/hadolint
|
||||
env:
|
||||
HADOLINT_VERSION: 1.19.0
|
||||
HADOLINT_VERSION: 2.5.0
|
||||
# End Download hadolint
|
||||
|
||||
# Test Dockerfiles
|
||||
- name: Run hadolint
|
||||
shell: bash
|
||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored | xargs hadolint
|
||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored --cached | xargs hadolint
|
||||
# End Test Dockerfiles
|
||||
|
37
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.0.1
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: fmt
|
||||
name: fmt
|
||||
description: Format files with cargo fmt.
|
||||
entry: cargo fmt
|
||||
language: system
|
||||
types: [rust]
|
||||
args: ["--", "--check"]
|
||||
- id: cargo-test
|
||||
name: cargo test
|
||||
description: Test the package for errors.
|
||||
entry: cargo test
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql", "--"]
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
description: Lint Rust sources
|
||||
entry: cargo clippy
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql", "--", "-D", "warnings"]
|
||||
types: [rust]
|
||||
pass_filenames: false
|
948
Cargo.lock
generated
61
Cargo.toml
@@ -1,10 +1,10 @@
|
||||
[package]
|
||||
name = "bitwarden_rs"
|
||||
name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2018"
|
||||
|
||||
repository = "https://github.com/dani-garcia/bitwarden_rs"
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
readme = "README.md"
|
||||
license = "GPL-3.0-only"
|
||||
publish = false
|
||||
@@ -28,17 +28,23 @@ syslog = "4.0.1"
|
||||
|
||||
[dependencies]
|
||||
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
||||
rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
||||
rocket_contrib = "0.5.0-dev"
|
||||
rocket = { version = "=0.5.0-dev", features = ["tls"], default-features = false }
|
||||
rocket_contrib = "=0.5.0-dev"
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.2", features = ["blocking", "json"] }
|
||||
reqwest = { version = "0.11.4", features = ["blocking", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||
|
||||
# Used for custom short lived cookie jar
|
||||
cookie = "0.15.1"
|
||||
cookie_store = "0.15.0"
|
||||
bytes = "1.0.1"
|
||||
url = "2.2.2"
|
||||
|
||||
# multipart/form-data support
|
||||
multipart = { version = "0.17.1", features = ["server"], default-features = false }
|
||||
multipart = { version = "0.18.0", features = ["server"], default-features = false }
|
||||
|
||||
# WebSockets library
|
||||
ws = { version = "0.10.0", package = "parity-ws" }
|
||||
ws = { version = "0.11.0", package = "parity-ws" }
|
||||
|
||||
# MessagePack library
|
||||
rmpv = "0.4.7"
|
||||
@@ -47,7 +53,7 @@ rmpv = "0.4.7"
|
||||
chashmap = "2.2.2"
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.125", features = ["derive"] }
|
||||
serde = { version = "1.0.126", features = ["derive"] }
|
||||
serde_json = "1.0.64"
|
||||
|
||||
# Logging
|
||||
@@ -55,14 +61,14 @@ log = "0.4.14"
|
||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.4.6", features = [ "chrono", "r2d2"] }
|
||||
diesel = { version = "1.4.7", features = [ "chrono", "r2d2"] }
|
||||
diesel_migrations = "1.4.0"
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.20.1", features = ["bundled"], optional = true }
|
||||
libsqlite3-sys = { version = "0.22.2", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = "0.8.3"
|
||||
rand = "0.8.4"
|
||||
ring = "0.16.20"
|
||||
|
||||
# UUID generation
|
||||
@@ -71,7 +77,10 @@ uuid = { version = "0.8.2", features = ["v4"] }
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
chrono-tz = "0.5.3"
|
||||
time = "0.2.26"
|
||||
time = "0.2.27"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler = "1.2.1"
|
||||
|
||||
# TOTP library
|
||||
oath = "0.10.2"
|
||||
@@ -84,6 +93,7 @@ jsonwebtoken = "7.2.0"
|
||||
|
||||
# U2F library
|
||||
u2f = "0.2.0"
|
||||
webauthn-rs = "=0.3.0-alpha.9"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
||||
@@ -92,38 +102,38 @@ yubico = { version = "0.10.0", features = ["online-tokio"], default-features = f
|
||||
dotenv = { version = "0.15.0", default-features = false }
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.7.2"
|
||||
once_cell = "1.8.0"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.14"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# Email libraries
|
||||
lettre = { version = "0.10.0-beta.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||
newline-converter = "0.2.0"
|
||||
tracing = { version = "0.1.26", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled.
|
||||
lettre = { version = "0.10.0-rc.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||
|
||||
# Template library
|
||||
handlebars = { version = "3.5.3", features = ["dir_source"] }
|
||||
handlebars = { version = "4.1.0", features = ["dir_source"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
html5ever = "0.25.1"
|
||||
markup5ever_rcdom = "0.1.0"
|
||||
regex = { version = "1.4.5", features = ["std", "perf"], default-features = false }
|
||||
regex = { version = "1.5.4", features = ["std", "perf"], default-features = false }
|
||||
data-url = "0.1.0"
|
||||
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.33"
|
||||
openssl = "0.10.35"
|
||||
|
||||
# URL encoding library
|
||||
percent-encoding = "2.1.0"
|
||||
# Punycode conversion
|
||||
idna = "0.2.2"
|
||||
idna = "0.2.3"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.4.0"
|
||||
pico-args = "0.4.2"
|
||||
|
||||
# Logging panics to logfile instead stderr only
|
||||
backtrace = "0.3.56"
|
||||
backtrace = "0.3.60"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.5"
|
||||
@@ -134,4 +144,11 @@ rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429d
|
||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
|
||||
# For favicon extraction from main website
|
||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = 'eb7330b5296c0d43816d1346211b74182bb4ae37' }
|
||||
|
||||
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
|
||||
# to any issues or PRs for almost a year (as of April 2021). This hopefully
|
||||
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
|
||||
# In particular, `cron` has since implemented parsing of some common syntax
|
||||
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
|
||||
job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' }
|
||||
|
65
README.md
@@ -1,15 +1,16 @@
|
||||
### This is a Bitwarden server API implementation written in Rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||
|
||||
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||
|
||||
---
|
||||
|
||||
[](https://travis-ci.org/dani-garcia/bitwarden_rs)
|
||||
[](https://hub.docker.com/r/bitwardenrs/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
|
||||
[](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
|
||||
[](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
|
||||
[](https://matrix.to/#/#bitwarden_rs:matrix.org)
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/master/LICENSE.txt)
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||
|
||||
@@ -33,29 +34,57 @@ Basically full implementation of Bitwarden API is provided including:
|
||||
Pull the docker image and mount a volume from the host for persistent storage:
|
||||
|
||||
```sh
|
||||
docker pull bitwardenrs/server:latest
|
||||
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 bitwardenrs/server:latest
|
||||
docker pull vaultwarden/server:latest
|
||||
docker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server:latest
|
||||
```
|
||||
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.
|
||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||
|
||||
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
||||
|
||||
This can be configured in [bitwarden_rs directly](https://github.com/dani-garcia/bitwarden_rs/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/bitwarden_rs/wiki/Proxy-examples)).
|
||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
|
||||
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
||||
|
||||
## Usage
|
||||
See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) for more information on how to configure and run the bitwarden_rs server.
|
||||
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||
|
||||
## Get in touch
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://bitwardenrs.discourse.group/).
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://vaultwarden.discourse.group/).
|
||||
|
||||
If you spot any bugs or crashes with bitwarden_rs itself, please [create an issue](https://github.com/dani-garcia/bitwarden_rs/issues/). Make sure there aren't any similar issues open, though!
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure there aren't any similar issues open, though!
|
||||
|
||||
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
|
||||
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||
|
||||
### Sponsors
|
||||
Thanks for your contribution to the project!
|
||||
|
||||
- [@ChonoN](https://github.com/ChonoN)
|
||||
- [@themightychris](https://github.com/themightychris)
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/netdadaltd">
|
||||
<img src="https://avatars.githubusercontent.com/u/77323954?s=75&v=4" width="75px;" alt="netdadaltd"/>
|
||||
<br />
|
||||
<sub><b>netDada Ltd.</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<br/>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/Gyarbij" style="width: 75px">
|
||||
<sub><b>Chono N</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/themightychris">
|
||||
<sub><b>Chris Alfano</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
45
SECURITY.md
Normal file
@@ -0,0 +1,45 @@
|
||||
Vaultwarden tries to prevent security issues but there could always slip something through.
|
||||
If you believe you've found a security issue in our application, we encourage you to
|
||||
notify us. We welcome working with you to resolve the issue promptly. Thanks in advance!
|
||||
|
||||
# Disclosure Policy
|
||||
|
||||
- Let us know as soon as possible upon discovery of a potential security issue, and we'll make every
|
||||
effort to quickly resolve the issue.
|
||||
- Provide us a reasonable amount of time to resolve the issue before any disclosure to the public or a
|
||||
third-party. We may publicly disclose the issue before resolving it, if appropriate.
|
||||
- Make a good faith effort to avoid privacy violations, destruction of data, and interruption or
|
||||
degradation of our service. Only interact with accounts you own or with explicit permission of the
|
||||
account holder.
|
||||
|
||||
# In-scope
|
||||
|
||||
- Security issues in any current release of Vaultwarden. Source code is available at https://github.com/dani-garcia/vaultwarden. This includes the current `latest` release and `main / testing` release.
|
||||
|
||||
# Exclusions
|
||||
|
||||
The following bug classes are out-of scope:
|
||||
|
||||
- Bugs that are already reported on Vaultwarden's issue tracker (https://github.com/dani-garcia/vaultwarden/issues)
|
||||
- Bugs that are not part of Vaultwarden, like on the the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated
|
||||
- Issues in an upstream software dependency (ex: Rust, or External Libraries) which are already reported to the upstream maintainer
|
||||
- Attacks requiring physical access to a user's device
|
||||
- Issues related to software or protocols not under Vaultwarden's control
|
||||
- Vulnerabilities in outdated versions of Vaultwarden
|
||||
- Missing security best practices that do not directly lead to a vulnerability (You may still report them as a normal issue)
|
||||
- Issues that do not have any impact on the general public
|
||||
|
||||
While researching, we'd like to ask you to refrain from:
|
||||
|
||||
- Denial of service
|
||||
- Spamming
|
||||
- Social engineering (including phishing) of Vaultwarden developers, contributors or users
|
||||
|
||||
Thank you for helping keep Vaultwarden and our users safe!
|
||||
|
||||
# How to contact us
|
||||
|
||||
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (user: `@danig:matrix.org`)
|
||||
- You can send an  to report a security issue.
|
||||
- If you want to send an encrypted email you can use the following GPG key:<br>
|
||||
https://keyserver.ubuntu.com/pks/lookup?search=0xB9B7A108373276BF3C0406F9FC8A7D14C3CD543A&fingerprint=on&op=index
|
@@ -1,22 +0,0 @@
|
||||
pool:
|
||||
vmImage: 'Ubuntu-18.04'
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
ls -la
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $(cat rust-toolchain) --profile=minimal
|
||||
echo "##vso[task.prependpath]$HOME/.cargo/bin"
|
||||
displayName: 'Install Rust'
|
||||
|
||||
- script: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||
displayName: 'Install build libraries.'
|
||||
|
||||
- script: |
|
||||
rustc -Vv
|
||||
cargo -V
|
||||
displayName: Query rust and cargo versions
|
||||
|
||||
- script : cargo test --features "sqlite,mysql,postgresql"
|
||||
displayName: 'Test project with sqlite, mysql and postgresql backends'
|
14
build.rs
@@ -1,7 +1,7 @@
|
||||
use std::process::Command;
|
||||
use std::env;
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
fn main() {
|
||||
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||
#[cfg(feature = "sqlite")]
|
||||
println!("cargo:rustc-cfg=sqlite");
|
||||
@@ -11,8 +11,10 @@ fn main() {
|
||||
println!("cargo:rustc-cfg=postgresql");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||
|
||||
compile_error!(
|
||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||
);
|
||||
|
||||
if let Ok(version) = env::var("BWRS_VERSION") {
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
@@ -56,12 +58,12 @@ fn read_git_info() -> Result<(), std::io::Error> {
|
||||
// Combined version
|
||||
let version = if let Some(exact) = exact_tag {
|
||||
exact
|
||||
} else if &branch != "master" {
|
||||
} else if &branch != "main" && &branch != "master" {
|
||||
format!("{}-{} ({})", last_tag, rev_short, branch)
|
||||
} else {
|
||||
format!("{}-{}", last_tag, rev_short)
|
||||
};
|
||||
|
||||
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||
# manifest, rather than the target arch. For example:
|
||||
#
|
||||
# $ docker inspect bitwardenrs/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||
# amd64
|
||||
#
|
||||
# Recent versions of Docker have started printing a warning when the image's
|
||||
|
@@ -1,15 +1,15 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.50" %}
|
||||
{% set build_stage_base_image = "rust:1.53" %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "clux/muslrust:nightly-2021-02-22" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.13" %}
|
||||
{% set build_stage_base_image = "clux/muslrust:nightly-2021-06-24" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.14" %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.13" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.14" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
@@ -44,26 +44,26 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_version = "2.19.0" %}
|
||||
{% set vault_image_digest = "sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4" %}
|
||||
{% set vault_version = "2.21.1" %}
|
||||
{% set vault_image_digest = "sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5" %}
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" bitwardenrs/web-vault:v{{ vault_version }}
|
||||
# [bitwardenrs/web-vault@{{ vault_image_digest }}]
|
||||
# $ docker pull vaultwarden/web-vault:v{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:v{{ vault_version }}
|
||||
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" bitwardenrs/web-vault@{{ vault_image_digest }}
|
||||
# [bitwardenrs/web-vault:v{{ vault_version }}]
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [vaultwarden/web-vault:v{{ vault_version }}]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@{{ vault_image_digest }} as vault
|
||||
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
@@ -75,7 +75,8 @@ ARG DB=sqlite,postgresql
|
||||
{% set features = "sqlite,postgresql" %}
|
||||
{% else %}
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
ARG DB=sqlite
|
||||
# We now also need to add vendored_openssl, because the current base image we use to build has OpenSSL removed.
|
||||
ARG DB=sqlite,vendored_openssl
|
||||
{% set features = "sqlite" %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
@@ -110,11 +111,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libpq-dev \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libmariadb-dev-compat{{ package_arch_prefix }}
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
|
||||
@@ -150,16 +147,15 @@ COPY ./build.rs ./build.rs
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
||||
|
||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
@@ -174,8 +170,8 @@ RUN rustup target add {{ package_arch_target }}
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
RUN find . -not -path "./target*" -delete
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
@@ -189,7 +185,8 @@ RUN touch src/main.rs
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "armv7" in target_file %}
|
||||
RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -206,18 +203,17 @@ ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
{% endif %}
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
{% endif %}
|
||||
# Install needed libraries
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
RUN apk add --no-cache \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
dumb-init \
|
||||
{% if "sqlite" in features %}
|
||||
sqlite \
|
||||
{% endif %}
|
||||
{% if "mysql" in features %}
|
||||
mariadb-connector-c \
|
||||
{% endif %}
|
||||
@@ -226,36 +222,35 @@ RUN apk add --no-cache \
|
||||
{% endif %}
|
||||
ca-certificates
|
||||
{% else %}
|
||||
RUN apt-get update && apt-get install -y \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
|
||||
RUN mkdir /data
|
||||
{% if "amd64" not in target_file %}
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
{% endif %}
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
{% if package_arch_target is defined %}
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
||||
{% else %}
|
||||
COPY --from=build /app/target/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
{% endif %}
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
@@ -264,6 +259,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -11,21 +11,21 @@
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||
# [vaultwarden/web-vault:v2.21.1]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.50 as build
|
||||
FROM rust:1.53 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
@@ -56,8 +56,8 @@ COPY ./build.rs ./build.rs
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
RUN cargo build --features ${DB} --release \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
@@ -79,28 +79,30 @@ ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -108,6 +110,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -11,21 +11,21 @@
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||
# [vaultwarden/web-vault:v2.21.1]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM clux/muslrust:nightly-2021-02-22 as build
|
||||
FROM clux/muslrust:nightly-2021-06-24 as build
|
||||
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
@@ -53,8 +53,8 @@ RUN rustup target add x86_64-unknown-linux-musl
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
RUN find . -not -path "./target*" -delete
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
@@ -70,32 +70,34 @@ RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.13
|
||||
FROM alpine:3.14
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite \
|
||||
postgresql-libs \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -103,6 +105,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -11,21 +11,21 @@
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||
# [vaultwarden/web-vault:v2.21.1]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.50 as build
|
||||
FROM rust:1.53 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
@@ -49,11 +49,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
libpq5:arm64 \
|
||||
libpq-dev \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
@@ -77,16 +73,15 @@ COPY ./build.rs ./build.rs
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
@@ -97,8 +92,8 @@ RUN rustup target add aarch64-unknown-linux-gnu
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
RUN find . -not -path "./target*" -delete
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
@@ -120,22 +115,22 @@ ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -144,9 +139,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -154,6 +150,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -11,21 +11,21 @@
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||
# [vaultwarden/web-vault:v2.21.1]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.50 as build
|
||||
FROM rust:1.53 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
@@ -49,11 +49,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
libpq5:armel \
|
||||
libpq-dev \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
@@ -77,16 +73,15 @@ COPY ./build.rs ./build.rs
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
@@ -97,8 +92,8 @@ RUN rustup target add arm-unknown-linux-gnueabi
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
RUN find . -not -path "./target*" -delete
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
@@ -120,22 +115,22 @@ ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -144,9 +139,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -154,6 +150,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -11,21 +11,21 @@
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||
# [vaultwarden/web-vault:v2.21.1]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.50 as build
|
||||
FROM rust:1.53 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
@@ -49,11 +49,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
libpq5:armhf \
|
||||
libpq-dev \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
@@ -77,16 +73,15 @@ COPY ./build.rs ./build.rs
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
@@ -97,8 +92,8 @@ RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
RUN find . -not -path "./target*" -delete
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
@@ -120,22 +115,22 @@ ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -144,9 +139,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -154,6 +150,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -11,24 +11,25 @@
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||
# $ docker pull vaultwarden/web-vault:v2.21.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
|
||||
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||
# [bitwardenrs/web-vault:v2.19.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
|
||||
# [vaultwarden/web-vault:v2.21.1]
|
||||
#
|
||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
ARG DB=sqlite
|
||||
# We now also need to add vendored_openssl, because the current base image we use to build has OpenSSL removed.
|
||||
ARG DB=sqlite,vendored_openssl
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
@@ -54,8 +55,8 @@ RUN rustup target add armv7-unknown-linux-musleabihf
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
RUN find . -not -path "./target*" -delete
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
@@ -67,30 +68,31 @@ RUN touch src/main.rs
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.13
|
||||
FROM balenalib/armv7hf-alpine:3.14
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
dumb-init \
|
||||
sqlite \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
@@ -99,9 +101,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -109,6 +112,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -1,10 +1,20 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ -r /etc/bitwarden_rs.sh ]; then
|
||||
if [ -r /etc/vaultwarden.sh ]; then
|
||||
. /etc/vaultwarden.sh
|
||||
elif [ -r /etc/bitwarden_rs.sh ]; then
|
||||
echo "### You are using the old /etc/bitwarden_rs.sh script, please migrate to /etc/vaultwarden.sh ###"
|
||||
. /etc/bitwarden_rs.sh
|
||||
fi
|
||||
|
||||
if [ -d /etc/bitwarden_rs.d ]; then
|
||||
if [ -d /etc/vaultwarden.d ]; then
|
||||
for f in /etc/vaultwarden.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
fi
|
||||
done
|
||||
elif [ -d /etc/bitwarden_rs.d ]; then
|
||||
echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###"
|
||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
@@ -12,4 +22,4 @@ if [ -d /etc/bitwarden_rs.d ]; then
|
||||
done
|
||||
fi
|
||||
|
||||
exec /bitwarden_rs "${@}"
|
||||
exec /vaultwarden "${@}"
|
||||
|
@@ -10,7 +10,7 @@ Docker Hub hooks provide these predefined [environment variables](https://docs.d
|
||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||
|
||||
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||
|
||||
## References
|
||||
|
||||
|
@@ -22,7 +22,7 @@ fi
|
||||
LABELS=(
|
||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/bitwarden_rs/wiki"
|
||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||
|
@@ -103,7 +103,7 @@ docker buildx build \
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
#
|
||||
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||
# existing bitwarden_rs tags, which adhere to the naming conventions of the
|
||||
# existing vaultwarden tags, which adhere to the naming conventions of the
|
||||
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||
|
2
migrations/mysql/2021-04-30-233251_add_reprompt/up.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN reprompt INTEGER;
|
2
migrations/mysql/2021-05-11-205202_add_hide_email/up.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE sends
|
||||
ADD COLUMN hide_email BOOLEAN;
|
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN private_key TEXT;
|
||||
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN public_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN reprompt INTEGER;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE sends
|
||||
ADD COLUMN hide_email BOOLEAN;
|
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN private_key TEXT;
|
||||
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN public_key TEXT;
|
2
migrations/sqlite/2021-04-30-233251_add_reprompt/up.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN reprompt INTEGER;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE sends
|
||||
ADD COLUMN hide_email BOOLEAN;
|
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN private_key TEXT;
|
||||
|
||||
ALTER TABLE organizations
|
||||
ADD COLUMN public_key TEXT;
|
99
resources/vaultwarden-icon-white.svg
Normal file
After Width: | Height: | Size: 8.7 KiB |
86
resources/vaultwarden-icon.svg
Normal file
After Width: | Height: | Size: 8.4 KiB |
271
resources/vaultwarden-logo-white.svg
Normal file
After Width: | Height: | Size: 17 KiB |
151
resources/vaultwarden-logo.svg
Normal file
After Width: | Height: | Size: 12 KiB |
@@ -1 +1 @@
|
||||
nightly-2021-02-22
|
||||
nightly-2021-06-24
|
@@ -1,2 +1,7 @@
|
||||
version = "Two"
|
||||
edition = "2018"
|
||||
max_width = 120
|
||||
newline_style = "Unix"
|
||||
use_small_heuristics = "Off"
|
||||
struct_lit_single_line = false
|
||||
overflow_delimited_expr = true
|
||||
|
193
src/api/admin.rs
@@ -1,11 +1,10 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
use std::{env, process::Command, time::Duration};
|
||||
use std::{env, time::Duration};
|
||||
|
||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||
use rocket::{
|
||||
http::{Cookie, Cookies, SameSite},
|
||||
http::{Cookie, Cookies, SameSite, Status},
|
||||
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||
response::{content::Html, Flash, Redirect},
|
||||
Route,
|
||||
@@ -13,13 +12,13 @@ use rocket::{
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, NumberOrString},
|
||||
api::{ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||
error::{Error, MapResult},
|
||||
mail,
|
||||
util::{format_naive_datetime_local, get_display_size, is_running_in_docker},
|
||||
util::{format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -31,6 +30,7 @@ pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
admin_login,
|
||||
get_users_json,
|
||||
get_user_json,
|
||||
post_admin_login,
|
||||
admin_page,
|
||||
invite_user,
|
||||
@@ -64,12 +64,8 @@ static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||
.unwrap_or("Unknown")
|
||||
});
|
||||
|
||||
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
|
||||
DbConnType::from_url(&CONFIG.database_url())
|
||||
.map(|t| t == DbConnType::sqlite)
|
||||
.unwrap_or(false)
|
||||
&& Command::new("sqlite3").arg("-version").status().is_ok()
|
||||
});
|
||||
static CAN_BACKUP: Lazy<bool> =
|
||||
Lazy::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false));
|
||||
|
||||
#[get("/")]
|
||||
fn admin_disabled() -> &'static str {
|
||||
@@ -142,7 +138,12 @@ fn admin_url(referer: Referer) -> String {
|
||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
// If there is an error, show it
|
||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg, "urlpath": CONFIG.domain_path()});
|
||||
let json = json!({
|
||||
"page_content": "admin/login",
|
||||
"version": VERSION,
|
||||
"error": msg,
|
||||
"urlpath": CONFIG.domain_path()
|
||||
});
|
||||
|
||||
// Return the page
|
||||
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
||||
@@ -166,10 +167,7 @@ fn post_admin_login(
|
||||
// If the token is invalid, redirect to login page
|
||||
if !_validate_token(&data.token) {
|
||||
error!("Invalid admin token. IP: {}", ip.ip);
|
||||
Err(Flash::error(
|
||||
Redirect::to(admin_url(referer)),
|
||||
"Invalid admin token, please try again.",
|
||||
))
|
||||
Err(Flash::error(Redirect::to(admin_url(referer)), "Invalid admin token, please try again."))
|
||||
} else {
|
||||
// If the token received is valid, generate JWT and save it as a cookie
|
||||
let claims = generate_admin_claims();
|
||||
@@ -198,9 +196,7 @@ fn _validate_token(token: &str) -> bool {
|
||||
struct AdminTemplateData {
|
||||
page_content: String,
|
||||
version: Option<&'static str>,
|
||||
users: Option<Vec<Value>>,
|
||||
organizations: Option<Vec<Value>>,
|
||||
diagnostics: Option<Value>,
|
||||
page_data: Option<Value>,
|
||||
config: Value,
|
||||
can_backup: bool,
|
||||
logged_in: bool,
|
||||
@@ -216,51 +212,19 @@ impl AdminTemplateData {
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
users: None,
|
||||
organizations: None,
|
||||
diagnostics: None,
|
||||
page_data: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn users(users: Vec<Value>) -> Self {
|
||||
fn with_data(page_content: &str, page_data: Value) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/users"),
|
||||
page_content: String::from(page_content),
|
||||
version: VERSION,
|
||||
users: Some(users),
|
||||
page_data: Some(page_data),
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
organizations: None,
|
||||
diagnostics: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn organizations(organizations: Vec<Value>) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/organizations"),
|
||||
version: VERSION,
|
||||
organizations: Some(organizations),
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
users: None,
|
||||
diagnostics: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn diagnostics(diagnostics: Value) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/diagnostics"),
|
||||
version: VERSION,
|
||||
organizations: None,
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
users: None,
|
||||
diagnostics: Some(diagnostics),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -281,23 +245,39 @@ struct InviteData {
|
||||
email: String,
|
||||
}
|
||||
|
||||
fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(uuid, conn) {
|
||||
Ok(user)
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/invite", data = "<data>")]
|
||||
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
let email = data.email.clone();
|
||||
if User::find_by_mail(&data.email, &conn).is_some() {
|
||||
err!("User already exists")
|
||||
err_code!("User already exists", Status::Conflict.code)
|
||||
}
|
||||
|
||||
let mut user = User::new(email);
|
||||
user.save(&conn)?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)
|
||||
} else {
|
||||
let invitation = Invitation::new(data.email);
|
||||
invitation.save(&conn)
|
||||
}
|
||||
// TODO: After try_blocks is stabilized, this can be made more readable
|
||||
// See: https://github.com/rust-lang/rust/issues/31436
|
||||
(|| {
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)?;
|
||||
} else {
|
||||
let invitation = Invitation::new(data.email);
|
||||
invitation.save(&conn)?;
|
||||
}
|
||||
|
||||
user.save(&conn)
|
||||
})()
|
||||
.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
}
|
||||
|
||||
#[post("/test/smtp", data = "<data>")]
|
||||
@@ -329,7 +309,8 @@ fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||
let users_json: Vec<Value> = users.iter()
|
||||
let users_json: Vec<Value> = users
|
||||
.iter()
|
||||
.map(|u| {
|
||||
let mut usr = u.to_json(&conn);
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||
@@ -339,25 +320,32 @@ fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||
usr["last_active"] = match u.last_active(&conn) {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||
None => json!("Never")
|
||||
None => json!("Never"),
|
||||
};
|
||||
usr
|
||||
})
|
||||
.collect();
|
||||
|
||||
let text = AdminTemplateData::users(users_json).render()?;
|
||||
let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>")]
|
||||
fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let user = get_user_or_404(&uuid, &conn)?;
|
||||
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
let user = get_user_or_404(&uuid, &conn)?;
|
||||
user.delete(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
@@ -366,7 +354,7 @@ fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
@@ -376,7 +364,7 @@ fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
user.enabled = true;
|
||||
|
||||
user.save(&conn)
|
||||
@@ -384,7 +372,7 @@ fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)
|
||||
@@ -424,7 +412,6 @@ fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: D
|
||||
user_to_edit.save(&conn)
|
||||
}
|
||||
|
||||
|
||||
#[post("/users/update_revision")]
|
||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&conn)
|
||||
@@ -433,7 +420,8 @@ fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
#[get("/organizations/overview")]
|
||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations = Organization::get_all(&conn);
|
||||
let organizations_json: Vec<Value> = organizations.iter()
|
||||
let organizations_json: Vec<Value> = organizations
|
||||
.iter()
|
||||
.map(|o| {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||
@@ -444,7 +432,7 @@ fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<St
|
||||
})
|
||||
.collect();
|
||||
|
||||
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
||||
let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
@@ -470,26 +458,15 @@ struct GitCommit {
|
||||
}
|
||||
|
||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let github_api = Client::builder().build()?;
|
||||
let github_api = get_reqwest_client();
|
||||
|
||||
Ok(github_api
|
||||
.get(url)
|
||||
.timeout(Duration::from_secs(10))
|
||||
.header(USER_AGENT, "Bitwarden_RS")
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json::<T>()?)
|
||||
Ok(github_api.get(url).timeout(Duration::from_secs(10)).send()?.error_for_status()?.json::<T>()?)
|
||||
}
|
||||
|
||||
fn has_http_access() -> bool {
|
||||
let http_access = Client::builder().build().unwrap();
|
||||
let http_access = get_reqwest_client();
|
||||
|
||||
match http_access
|
||||
.head("https://github.com/dani-garcia/bitwarden_rs")
|
||||
.timeout(Duration::from_secs(10))
|
||||
.header(USER_AGENT, "Bitwarden_RS")
|
||||
.send()
|
||||
{
|
||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").timeout(Duration::from_secs(10)).send() {
|
||||
Ok(r) => r.status().is_success(),
|
||||
_ => false,
|
||||
}
|
||||
@@ -502,9 +479,16 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
||||
let vault_version_str = read_file_string(&vault_version_path)?;
|
||||
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "bwrs-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
@@ -524,11 +508,11 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") {
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main") {
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
@@ -540,7 +524,9 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
if running_within_docker {
|
||||
"-".to_string()
|
||||
} else {
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
|
||||
match get_github_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
) {
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
}
|
||||
@@ -552,14 +538,15 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
|
||||
let ip_header_name = match &ip_header.0 {
|
||||
Some(h) => h,
|
||||
_ => ""
|
||||
_ => "",
|
||||
};
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"latest_release": latest_release,
|
||||
"latest_commit": latest_commit,
|
||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"latest_web_build": latest_web_build,
|
||||
"running_within_docker": running_within_docker,
|
||||
"has_http_access": has_http_access,
|
||||
@@ -571,10 +558,12 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
"db_type": *DB_TYPE,
|
||||
"db_version": get_sql_server_version(&conn),
|
||||
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||
});
|
||||
|
||||
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
||||
let text = AdminTemplateData::with_data("admin/diagnostics", diagnostics_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
@@ -596,11 +585,11 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/config/backup_db")]
|
||||
fn backup_db(_token: AdminToken) -> EmptyResult {
|
||||
fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
if *CAN_BACKUP {
|
||||
backup_database()
|
||||
backup_database(&conn)
|
||||
} else {
|
||||
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
|
||||
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -95,7 +95,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
None => {
|
||||
// Order is important here; the invitation check must come first
|
||||
// because the bitwarden_rs admin can invite anyone, regardless
|
||||
// because the vaultwarden admin can invite anyone, regardless
|
||||
// of other signup restrictions.
|
||||
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
||||
User::new(data.Email.clone())
|
||||
@@ -231,7 +231,10 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash, Some("post_rotatekey"));
|
||||
user.set_password(
|
||||
&data.NewMasterPasswordHash,
|
||||
Some(vec![String::from("post_rotatekey"), String::from("get_contacts")]),
|
||||
);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
}
|
||||
@@ -320,15 +323,9 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
err!("The cipher is not owned by the user")
|
||||
}
|
||||
|
||||
update_cipher_from_data(
|
||||
&mut saved_cipher,
|
||||
cipher_data,
|
||||
&headers,
|
||||
false,
|
||||
&conn,
|
||||
&nt,
|
||||
UpdateType::CipherUpdate,
|
||||
)?
|
||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None)?
|
||||
}
|
||||
|
||||
// Update user data
|
||||
@@ -337,7 +334,6 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
user.akey = data.Key;
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
user.reset_stamp_exception();
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
@@ -584,24 +580,45 @@ struct PasswordHintData {
|
||||
|
||||
#[post("/accounts/password-hint", data = "<data>")]
|
||||
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordHintData = data.into_inner().data;
|
||||
|
||||
let hint = match User::find_by_mail(&data.Email, &conn) {
|
||||
Some(user) => user.password_hint,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_password_hint(&data.Email, hint)?;
|
||||
} else if CONFIG.show_password_hint() {
|
||||
if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", &hint));
|
||||
} else {
|
||||
err!("Sorry, you have no password hint...");
|
||||
}
|
||||
if !CONFIG.mail_enabled() && !CONFIG.show_password_hint() {
|
||||
err!("This server is not configured to provide password hints.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
const NO_HINT: &str = "Sorry, you have no password hint...";
|
||||
|
||||
let data: PasswordHintData = data.into_inner().data;
|
||||
let email = &data.Email;
|
||||
|
||||
match User::find_by_mail(email, &conn) {
|
||||
None => {
|
||||
// To prevent user enumeration, act as if the user exists.
|
||||
if CONFIG.mail_enabled() {
|
||||
// There is still a timing side channel here in that the code
|
||||
// paths that send mail take noticeably longer than ones that
|
||||
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||
use rand::{thread_rng, Rng};
|
||||
let mut rng = thread_rng();
|
||||
let base = 1000;
|
||||
let delta: i32 = 100;
|
||||
let sleep_ms = (base + rng.gen_range(-delta..=delta)) as u64;
|
||||
std::thread::sleep(std::time::Duration::from_millis(sleep_ms));
|
||||
Ok(())
|
||||
} else {
|
||||
err!(NO_HINT);
|
||||
}
|
||||
}
|
||||
Some(user) => {
|
||||
let hint: Option<String> = user.password_hint;
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_password_hint(email, hint)?;
|
||||
Ok(())
|
||||
} else if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", hint));
|
||||
} else {
|
||||
err!(NO_HINT);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
@@ -1,19 +1,18 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use rocket::{http::ContentType, request::Form, Data, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use data_encoding::HEXLOWER;
|
||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||
|
||||
use crate::{
|
||||
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -25,7 +24,7 @@ pub fn routes() -> Vec<Route> {
|
||||
// whether the user is an owner/admin of the relevant org, and if so,
|
||||
// allows the operation unconditionally.
|
||||
//
|
||||
// bitwarden_rs factors in the org owner/admin status as part of
|
||||
// vaultwarden factors in the org owner/admin status as part of
|
||||
// determining the write accessibility of a cipher, so most
|
||||
// admin/non-admin implementations can be shared.
|
||||
routes![
|
||||
@@ -39,8 +38,11 @@ pub fn routes() -> Vec<Route> {
|
||||
post_ciphers_admin,
|
||||
post_ciphers_create,
|
||||
post_ciphers_import,
|
||||
post_attachment,
|
||||
post_attachment_admin,
|
||||
get_attachment,
|
||||
post_attachment_v2,
|
||||
post_attachment_v2_data,
|
||||
post_attachment, // legacy
|
||||
post_attachment_admin, // legacy
|
||||
post_attachment_share,
|
||||
delete_attachment_post,
|
||||
delete_attachment_post_admin,
|
||||
@@ -77,6 +79,15 @@ pub fn routes() -> Vec<Route> {
|
||||
]
|
||||
}
|
||||
|
||||
pub fn purge_trashed_ciphers(pool: DbPool) {
|
||||
debug!("Purging trashed ciphers");
|
||||
if let Ok(conn) = pool.get() {
|
||||
Cipher::purge_trash(&conn);
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging trashed ciphers")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(FromForm, Default)]
|
||||
struct SyncData {
|
||||
#[form(field = "excludeDomains")]
|
||||
@@ -91,24 +102,18 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||
let collections_json: Vec<Value> = collections.iter()
|
||||
.map(|c| c.to_json_details(&headers.user.uuid, &conn))
|
||||
.collect();
|
||||
let collections_json: Vec<Value> =
|
||||
collections.iter().map(|c| c.to_json_details(&headers.user.uuid, &conn)).collect();
|
||||
|
||||
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
.collect();
|
||||
let ciphers_json: Vec<Value> =
|
||||
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||
|
||||
let sends = Send::find_by_user(&headers.user.uuid, &conn);
|
||||
let sends_json: Vec<Value> = sends
|
||||
.iter()
|
||||
.map(|s| s.to_json())
|
||||
.collect();
|
||||
let sends_json: Vec<Value> = sends.iter().map(|s| s.to_json()).collect();
|
||||
|
||||
let domains_json = if data.exclude_domains {
|
||||
Value::Null
|
||||
@@ -124,6 +129,7 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
"Ciphers": ciphers_json,
|
||||
"Domains": domains_json,
|
||||
"Sends": sends_json,
|
||||
"unofficialServer": true,
|
||||
"Object": "sync"
|
||||
}))
|
||||
}
|
||||
@@ -132,10 +138,8 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
.collect();
|
||||
let ciphers_json: Vec<Value> =
|
||||
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||
|
||||
Json(json!({
|
||||
"Data": ciphers_json,
|
||||
@@ -197,6 +201,7 @@ pub struct CipherData {
|
||||
Identity: Option<Value>,
|
||||
|
||||
Favorite: Option<bool>,
|
||||
Reprompt: Option<i32>,
|
||||
|
||||
PasswordHistory: Option<Value>,
|
||||
|
||||
@@ -236,7 +241,7 @@ fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn
|
||||
|
||||
// Check if there are one more more collections selected when this cipher is part of an organization.
|
||||
// err if this is not the case before creating an empty cipher.
|
||||
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
|
||||
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
|
||||
err!("You must select at least one collection.");
|
||||
}
|
||||
|
||||
@@ -263,7 +268,13 @@ fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn
|
||||
/// Called when creating a new user-owned cipher.
|
||||
#[post("/ciphers", data = "<data>")]
|
||||
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
let data: CipherData = data.into_inner().data;
|
||||
let mut data: CipherData = data.into_inner().data;
|
||||
|
||||
// The web/browser clients set this field to null as expected, but the
|
||||
// mobile clients seem to set the invalid value `0001-01-01T00:00:00`,
|
||||
// which results in a warning message being logged. This field isn't
|
||||
// needed when creating a new cipher, so just ignore it unconditionally.
|
||||
data.LastKnownRevisionDate = None;
|
||||
|
||||
let mut cipher = Cipher::new(data.Type, data.Name.clone());
|
||||
update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &nt, UpdateType::CipherCreate)?;
|
||||
@@ -278,17 +289,12 @@ fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt
|
||||
/// allowed to delete or share such ciphers to an org, however.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
|
||||
fn enforce_personal_ownership_policy(
|
||||
data: &CipherData,
|
||||
headers: &Headers,
|
||||
conn: &DbConn
|
||||
) -> EmptyResult {
|
||||
fn enforce_personal_ownership_policy(data: &CipherData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
if data.OrganizationId.is_none() {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::PersonalOwnership;
|
||||
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
err!("Due to an Enterprise Policy, you are restricted from \
|
||||
saving items to your personal vault.")
|
||||
err!("Due to an Enterprise Policy, you are restricted from saving items to your personal vault.")
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -307,11 +313,12 @@ pub fn update_cipher_from_data(
|
||||
|
||||
// Check that the client isn't updating an existing cipher with stale data.
|
||||
if let Some(dt) = data.LastKnownRevisionDate {
|
||||
match NaiveDateTime::parse_from_str(&dt, "%+") { // ISO 8601 format
|
||||
Err(err) =>
|
||||
warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 =>
|
||||
err!("The client copy of this cipher is out of date. Resync the client and try again."),
|
||||
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||
// ISO 8601 format
|
||||
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||
}
|
||||
Ok(_) => (),
|
||||
}
|
||||
}
|
||||
@@ -321,12 +328,12 @@ pub fn update_cipher_from_data(
|
||||
}
|
||||
|
||||
if let Some(org_id) = data.OrganizationId {
|
||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn) {
|
||||
None => err!("You don't have permission to add item to organization"),
|
||||
Some(org_user) => {
|
||||
if shared_to_collection
|
||||
|| org_user.has_full_access()
|
||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn)
|
||||
{
|
||||
cipher.organization_uuid = Some(org_id);
|
||||
// After some discussion in PR #1329 re-added the user_uuid = None again.
|
||||
@@ -358,7 +365,7 @@ pub fn update_cipher_from_data(
|
||||
// Modify attachments name and keys when rotating
|
||||
if let Some(attachments) = data.Attachments2 {
|
||||
for (id, attachment) in attachments {
|
||||
let mut saved_att = match Attachment::find_by_id(&id, &conn) {
|
||||
let mut saved_att = match Attachment::find_by_id(&id, conn) {
|
||||
Some(att) => att,
|
||||
None => err!("Attachment doesn't exist"),
|
||||
};
|
||||
@@ -373,7 +380,7 @@ pub fn update_cipher_from_data(
|
||||
saved_att.akey = Some(attachment.Key);
|
||||
saved_att.file_name = attachment.FileName;
|
||||
|
||||
saved_att.save(&conn)?;
|
||||
saved_att.save(conn)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -384,12 +391,9 @@ pub fn update_cipher_from_data(
|
||||
// But, we at least know we do not need to store and return this specific key.
|
||||
fn _clean_cipher_data(mut json_data: Value) -> Value {
|
||||
if json_data.is_array() {
|
||||
json_data.as_array_mut()
|
||||
.unwrap()
|
||||
.iter_mut()
|
||||
.for_each(|ref mut f| {
|
||||
f.as_object_mut().unwrap().remove("Response");
|
||||
});
|
||||
json_data.as_array_mut().unwrap().iter_mut().for_each(|ref mut f| {
|
||||
f.as_object_mut().unwrap().remove("Response");
|
||||
});
|
||||
};
|
||||
json_data
|
||||
}
|
||||
@@ -411,22 +415,23 @@ pub fn update_cipher_from_data(
|
||||
data["Uris"] = _clean_cipher_data(data["Uris"].clone());
|
||||
}
|
||||
data
|
||||
},
|
||||
}
|
||||
None => err!("Data missing"),
|
||||
};
|
||||
|
||||
cipher.name = data.Name;
|
||||
cipher.notes = data.Notes;
|
||||
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string() );
|
||||
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
||||
cipher.data = type_data.to_string();
|
||||
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
||||
cipher.reprompt = data.Reprompt;
|
||||
|
||||
cipher.save(&conn)?;
|
||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
||||
cipher.set_favorite(data.Favorite, &headers.user.uuid, &conn)?;
|
||||
cipher.save(conn)?;
|
||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, conn)?;
|
||||
cipher.set_favorite(data.Favorite, &headers.user.uuid, conn)?;
|
||||
|
||||
if ut != UpdateType::None {
|
||||
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
||||
nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -592,14 +597,11 @@ fn post_collections_admin(
|
||||
}
|
||||
|
||||
let posted_collections: HashSet<String> = data.CollectionIds.iter().cloned().collect();
|
||||
let current_collections: HashSet<String> = cipher
|
||||
.get_collections(&headers.user.uuid, &conn)
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
let current_collections: HashSet<String> =
|
||||
cipher.get_collections(&headers.user.uuid, &conn).iter().cloned().collect();
|
||||
|
||||
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
||||
match Collection::find_by_uuid(&collection, &conn) {
|
||||
match Collection::find_by_uuid(collection, &conn) {
|
||||
None => err!("Invalid collection ID provided"),
|
||||
Some(collection) => {
|
||||
if collection.is_writable_by_user(&headers.user.uuid, &conn) {
|
||||
@@ -713,9 +715,9 @@ fn share_cipher_by_uuid(
|
||||
conn: &DbConn,
|
||||
nt: &Notify,
|
||||
) -> JsonResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
let mut cipher = match Cipher::find_by_uuid(uuid, conn) {
|
||||
Some(cipher) => {
|
||||
if cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
if cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||
cipher
|
||||
} else {
|
||||
err!("Cipher is not write accessible")
|
||||
@@ -732,11 +734,11 @@ fn share_cipher_by_uuid(
|
||||
None => {}
|
||||
Some(organization_uuid) => {
|
||||
for uuid in &data.CollectionIds {
|
||||
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
||||
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, conn) {
|
||||
None => err!("Invalid collection ID provided"),
|
||||
Some(collection) => {
|
||||
if collection.is_writable_by_user(&headers.user.uuid, &conn) {
|
||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn)?;
|
||||
if collection.is_writable_by_user(&headers.user.uuid, conn) {
|
||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn)?;
|
||||
shared_to_collection = true;
|
||||
} else {
|
||||
err!("No rights to modify the collection")
|
||||
@@ -750,45 +752,126 @@ fn share_cipher_by_uuid(
|
||||
update_cipher_from_data(
|
||||
&mut cipher,
|
||||
data.Cipher,
|
||||
&headers,
|
||||
headers,
|
||||
shared_to_collection,
|
||||
&conn,
|
||||
&nt,
|
||||
conn,
|
||||
nt,
|
||||
UpdateType::CipherUpdate,
|
||||
)?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn)))
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_attachment(
|
||||
/// v2 API for downloading an attachment. This just redirects the client to
|
||||
/// the actual location of an attachment.
|
||||
///
|
||||
/// Upstream added this v2 API to support direct download of attachments from
|
||||
/// their object storage service. For self-hosted instances, it basically just
|
||||
/// redirects to the same location as before the v2 API.
|
||||
#[get("/ciphers/<uuid>/attachment/<attachment_id>")]
|
||||
fn get_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
match Attachment::find_by_id(&attachment_id, &conn) {
|
||||
Some(attachment) if uuid == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))),
|
||||
Some(_) => err!("Attachment doesn't belong to cipher"),
|
||||
None => err!("Attachment doesn't exist"),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct AttachmentRequestData {
|
||||
Key: String,
|
||||
FileName: String,
|
||||
FileSize: i32,
|
||||
// We check org owner/admin status via is_write_accessible_to_user(),
|
||||
// so we can just ignore this field.
|
||||
//
|
||||
// AdminRequest: bool,
|
||||
}
|
||||
|
||||
enum FileUploadType {
|
||||
Direct = 0,
|
||||
// Azure = 1, // only used upstream
|
||||
}
|
||||
|
||||
/// v2 API for creating an attachment associated with a cipher.
|
||||
/// This redirects the client to the API it should use to upload the attachment.
|
||||
/// For upstream's cloud-hosted service, it's an Azure object storage API.
|
||||
/// For self-hosted instances, it's another API on the local instance.
|
||||
#[post("/ciphers/<uuid>/attachment/v2", data = "<data>")]
|
||||
fn post_attachment_v2(
|
||||
uuid: String,
|
||||
data: Data,
|
||||
content_type: &ContentType,
|
||||
data: JsonUpcase<AttachmentRequestData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> JsonResult {
|
||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
err!("Cipher is not write accessible")
|
||||
}
|
||||
|
||||
let attachment_id = crypto::generate_attachment_id();
|
||||
let data: AttachmentRequestData = data.into_inner().data;
|
||||
let attachment =
|
||||
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
|
||||
attachment.save(&conn).expect("Error saving attachment");
|
||||
|
||||
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
|
||||
|
||||
Ok(Json(json!({ // AttachmentUploadDataResponseModel
|
||||
"Object": "attachment-fileUpload",
|
||||
"AttachmentId": attachment_id,
|
||||
"Url": url,
|
||||
"FileUploadType": FileUploadType::Direct as i32,
|
||||
"CipherResponse": cipher.to_json(&headers.host, &headers.user.uuid, &conn),
|
||||
"CipherMiniResponse": null,
|
||||
})))
|
||||
}
|
||||
|
||||
/// Saves the data content of an attachment to a file. This is common code
|
||||
/// shared between the v2 and legacy attachment APIs.
|
||||
///
|
||||
/// When used with the legacy API, this function is responsible for creating
|
||||
/// the attachment database record, so `attachment` is None.
|
||||
///
|
||||
/// When used with the v2 API, post_attachment_v2() has already created the
|
||||
/// database record, which is passed in as `attachment`.
|
||||
fn save_attachment(
|
||||
mut attachment: Option<Attachment>,
|
||||
cipher_uuid: String,
|
||||
data: Data,
|
||||
content_type: &ContentType,
|
||||
headers: &Headers,
|
||||
conn: &DbConn,
|
||||
nt: Notify,
|
||||
) -> Result<Cipher, crate::error::Error> {
|
||||
let cipher = match Cipher::find_by_uuid(&cipher_uuid, conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err_discard!("Cipher doesn't exist", data),
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||
err_discard!("Cipher is not write accessible", data)
|
||||
}
|
||||
|
||||
let mut params = content_type.params();
|
||||
let boundary_pair = params.next().expect("No boundary provided");
|
||||
let boundary = boundary_pair.1;
|
||||
// In the v2 API, the attachment record has already been created,
|
||||
// so the size limit needs to be adjusted to account for that.
|
||||
let size_adjust = match &attachment {
|
||||
None => 0, // Legacy API
|
||||
Some(a) => a.file_size as i64, // v2 API
|
||||
};
|
||||
|
||||
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
||||
match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err_discard!("Attachments are disabled", data),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn);
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, conn) + size_adjust;
|
||||
if left <= 0 {
|
||||
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||
err_discard!("Attachment storage limit reached! Delete some attachments to free up space", data)
|
||||
}
|
||||
Some(left as u64)
|
||||
}
|
||||
@@ -798,9 +881,9 @@ fn post_attachment(
|
||||
match CONFIG.org_attachment_limit() {
|
||||
Some(0) => err_discard!("Attachments are disabled", data),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn);
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, conn) + size_adjust;
|
||||
if left <= 0 {
|
||||
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||
err_discard!("Attachment storage limit reached! Delete some attachments to free up space", data)
|
||||
}
|
||||
Some(left as u64)
|
||||
}
|
||||
@@ -810,7 +893,12 @@ fn post_attachment(
|
||||
err_discard!("Cipher is neither owned by a user nor an organization", data);
|
||||
};
|
||||
|
||||
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher.uuid);
|
||||
let mut params = content_type.params();
|
||||
let boundary_pair = params.next().expect("No boundary provided");
|
||||
let boundary = boundary_pair.1;
|
||||
|
||||
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher_uuid);
|
||||
let mut path = PathBuf::new();
|
||||
|
||||
let mut attachment_key = None;
|
||||
let mut error = None;
|
||||
@@ -826,34 +914,81 @@ fn post_attachment(
|
||||
}
|
||||
}
|
||||
"data" => {
|
||||
// This is provided by the client, don't trust it
|
||||
let name = field.headers.filename.expect("No filename provided");
|
||||
// In the legacy API, this is the encrypted filename
|
||||
// provided by the client, stored to the database as-is.
|
||||
// In the v2 API, this value doesn't matter, as it was
|
||||
// already provided and stored via an earlier API call.
|
||||
let encrypted_filename = field.headers.filename;
|
||||
|
||||
let file_name = HEXLOWER.encode(&crypto::get_random(vec![0; 10]));
|
||||
let path = base_path.join(&file_name);
|
||||
|
||||
let size = match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Attachment is not a file: {:?}", other));
|
||||
return;
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||
return;
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Error: {:?}", e));
|
||||
return;
|
||||
}
|
||||
// This random ID is used as the name of the file on disk.
|
||||
// In the legacy API, we need to generate this value here.
|
||||
// In the v2 API, we use the value from post_attachment_v2().
|
||||
let file_id = match &attachment {
|
||||
Some(attachment) => attachment.id.clone(), // v2 API
|
||||
None => crypto::generate_attachment_id(), // Legacy API
|
||||
};
|
||||
path = base_path.join(&file_id);
|
||||
|
||||
let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
|
||||
attachment.akey = attachment_key.clone();
|
||||
attachment.save(&conn).expect("Error saving attachment");
|
||||
let size =
|
||||
match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
error = Some(format!("Attachment is not a file: {:?}", other));
|
||||
return;
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
error = Some(format!("Attachment storage limit exceeded with this file: {:?}", reason));
|
||||
return;
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
error = Some(format!("Error: {:?}", e));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(attachment) = &mut attachment {
|
||||
// v2 API
|
||||
|
||||
// Check the actual size against the size initially provided by
|
||||
// the client. Upstream allows +/- 1 MiB deviation from this
|
||||
// size, but it's not clear when or why this is needed.
|
||||
const LEEWAY: i32 = 1024 * 1024; // 1 MiB
|
||||
let min_size = attachment.file_size - LEEWAY;
|
||||
let max_size = attachment.file_size + LEEWAY;
|
||||
|
||||
if min_size <= size && size <= max_size {
|
||||
if size != attachment.file_size {
|
||||
// Update the attachment with the actual file size.
|
||||
attachment.file_size = size;
|
||||
attachment.save(conn).expect("Error updating attachment");
|
||||
}
|
||||
} else {
|
||||
attachment.delete(conn).ok();
|
||||
|
||||
let err_msg = "Attachment size mismatch".to_string();
|
||||
error!("{} (expected within [{}, {}], got {})", err_msg, min_size, max_size, size);
|
||||
error = Some(err_msg);
|
||||
}
|
||||
} else {
|
||||
// Legacy API
|
||||
|
||||
if encrypted_filename.is_none() {
|
||||
error = Some("No filename provided".to_string());
|
||||
return;
|
||||
}
|
||||
if attachment_key.is_none() {
|
||||
error = Some("No attachment key provided".to_string());
|
||||
return;
|
||||
}
|
||||
let attachment = Attachment::new(
|
||||
file_id,
|
||||
cipher_uuid.clone(),
|
||||
encrypted_filename.unwrap(),
|
||||
size,
|
||||
attachment_key.clone(),
|
||||
);
|
||||
attachment.save(conn).expect("Error saving attachment");
|
||||
}
|
||||
}
|
||||
_ => error!("Invalid multipart name"),
|
||||
}
|
||||
@@ -861,10 +996,55 @@ fn post_attachment(
|
||||
.expect("Error processing multipart data");
|
||||
|
||||
if let Some(ref e) = error {
|
||||
std::fs::remove_file(path).ok();
|
||||
err!(e);
|
||||
}
|
||||
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn));
|
||||
|
||||
Ok(cipher)
|
||||
}
|
||||
|
||||
/// v2 API for uploading the actual data content of an attachment.
|
||||
/// This route needs a rank specified so that Rocket prioritizes the
|
||||
/// /ciphers/<uuid>/attachment/v2 route, which would otherwise conflict
|
||||
/// with this one.
|
||||
#[post("/ciphers/<uuid>/attachment/<attachment_id>", format = "multipart/form-data", data = "<data>", rank = 1)]
|
||||
fn post_attachment_v2_data(
|
||||
uuid: String,
|
||||
attachment_id: String,
|
||||
data: Data,
|
||||
content_type: &ContentType,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
let attachment = match Attachment::find_by_id(&attachment_id, &conn) {
|
||||
Some(attachment) if uuid == attachment.cipher_uuid => Some(attachment),
|
||||
Some(_) => err!("Attachment doesn't belong to cipher"),
|
||||
None => err!("Attachment doesn't exist"),
|
||||
};
|
||||
|
||||
save_attachment(attachment, uuid, data, content_type, &headers, &conn, nt)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Legacy API for creating an attachment associated with a cipher.
|
||||
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_attachment(
|
||||
uuid: String,
|
||||
data: Data,
|
||||
content_type: &ContentType,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> JsonResult {
|
||||
// Setting this as None signifies to save_attachment() that it should create
|
||||
// the attachment database record as well as saving the data to disk.
|
||||
let attachment = None;
|
||||
|
||||
let cipher = save_attachment(attachment, uuid, data, content_type, &headers, &conn, nt)?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
@@ -984,12 +1164,22 @@ fn delete_cipher_selected_admin(data: JsonUpcase<Value>, headers: Headers, conn:
|
||||
}
|
||||
|
||||
#[post("/ciphers/delete-admin", data = "<data>")]
|
||||
fn delete_cipher_selected_post_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
fn delete_cipher_selected_post_admin(
|
||||
data: JsonUpcase<Value>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
delete_cipher_selected_post(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/delete-admin", data = "<data>")]
|
||||
fn delete_cipher_selected_put_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
fn delete_cipher_selected_put_admin(
|
||||
data: JsonUpcase<Value>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
delete_cipher_selected_put(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
@@ -1119,28 +1309,34 @@ fn delete_all(
|
||||
}
|
||||
|
||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_delete: bool, nt: &Notify) -> EmptyResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
let mut cipher = match Cipher::find_by_uuid(uuid, conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||
err!("Cipher can't be deleted by user")
|
||||
}
|
||||
|
||||
if soft_delete {
|
||||
cipher.deleted_at = Some(Utc::now().naive_utc());
|
||||
cipher.save(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
cipher.save(conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn));
|
||||
} else {
|
||||
cipher.delete(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||
cipher.delete(conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(conn));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, soft_delete: bool, nt: Notify) -> EmptyResult {
|
||||
fn _delete_multiple_ciphers(
|
||||
data: JsonUpcase<Value>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
soft_delete: bool,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
@@ -1161,20 +1357,20 @@ fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbC
|
||||
}
|
||||
|
||||
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
let mut cipher = match Cipher::find_by_uuid(uuid, conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||
err!("Cipher can't be restored by user")
|
||||
}
|
||||
|
||||
cipher.deleted_at = None;
|
||||
cipher.save(&conn)?;
|
||||
cipher.save(conn)?;
|
||||
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn));
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn)))
|
||||
}
|
||||
|
||||
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||
@@ -1192,7 +1388,7 @@ fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: &Headers, conn: &
|
||||
for uuid in uuids {
|
||||
match _restore_cipher_by_uuid(uuid, headers, conn, nt) {
|
||||
Ok(json) => ciphers.push(json.into_inner()),
|
||||
err => return err
|
||||
err => return err,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1210,7 +1406,7 @@ fn _delete_cipher_attachment_by_id(
|
||||
conn: &DbConn,
|
||||
nt: &Notify,
|
||||
) -> EmptyResult {
|
||||
let attachment = match Attachment::find_by_id(&attachment_id, &conn) {
|
||||
let attachment = match Attachment::find_by_id(attachment_id, conn) {
|
||||
Some(attachment) => attachment,
|
||||
None => err!("Attachment doesn't exist"),
|
||||
};
|
||||
@@ -1219,17 +1415,17 @@ fn _delete_cipher_attachment_by_id(
|
||||
err!("Attachment from other cipher")
|
||||
}
|
||||
|
||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
let cipher = match Cipher::find_by_uuid(uuid, conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) {
|
||||
err!("Cipher cannot be deleted by user")
|
||||
}
|
||||
|
||||
// Delete attachment
|
||||
attachment.delete(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
attachment.delete(conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn));
|
||||
Ok(())
|
||||
}
|
||||
|
24
src/api/core/emergency_access.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{api::JsonResult, auth::Headers, db::DbConn};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_contacts,]
|
||||
}
|
||||
|
||||
/// This endpoint is expected to return at least something.
|
||||
/// If we return an error message that will trigger error toasts for the user.
|
||||
/// To prevent this we just return an empty json result with no Data.
|
||||
/// When this feature is going to be implemented it also needs to return this empty Data
|
||||
/// instead of throwing an error/4XX unless it really is an error.
|
||||
#[get("/emergency-access/trusted")]
|
||||
fn get_contacts(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
debug!("Emergency access is not supported.");
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": [],
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
@@ -8,15 +8,7 @@ use crate::{
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
get_folders,
|
||||
get_folder,
|
||||
post_folders,
|
||||
post_folder,
|
||||
put_folder,
|
||||
delete_folder_post,
|
||||
delete_folder,
|
||||
]
|
||||
routes![get_folders, get_folder, post_folders, post_folder, put_folder, delete_folder_post, delete_folder,]
|
||||
}
|
||||
|
||||
#[get("/folders")]
|
||||
|
@@ -1,25 +1,22 @@
|
||||
mod accounts;
|
||||
mod ciphers;
|
||||
mod emergency_access;
|
||||
mod folders;
|
||||
mod organizations;
|
||||
pub mod two_factor;
|
||||
mod sends;
|
||||
pub mod two_factor;
|
||||
|
||||
pub use sends::start_send_deletion_scheduler;
|
||||
pub use ciphers::purge_trashed_ciphers;
|
||||
pub use sends::purge_sends;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut mod_routes = routes![
|
||||
clear_device_token,
|
||||
put_device_token,
|
||||
get_eq_domains,
|
||||
post_eq_domains,
|
||||
put_eq_domains,
|
||||
hibp_breach,
|
||||
];
|
||||
let mut mod_routes =
|
||||
routes![clear_device_token, put_device_token, get_eq_domains, post_eq_domains, put_eq_domains, hibp_breach,];
|
||||
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut accounts::routes());
|
||||
routes.append(&mut ciphers::routes());
|
||||
routes.append(&mut emergency_access::routes());
|
||||
routes.append(&mut folders::routes());
|
||||
routes.append(&mut organizations::routes());
|
||||
routes.append(&mut two_factor::routes());
|
||||
@@ -34,7 +31,6 @@ pub fn routes() -> Vec<Route> {
|
||||
//
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::response::Response;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -42,10 +38,11 @@ use crate::{
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
error::Error,
|
||||
util::get_reqwest_client,
|
||||
};
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
fn clear_device_token<'a>(uuid: String) -> Response<'a> {
|
||||
fn clear_device_token(uuid: String) -> &'static str {
|
||||
// This endpoint doesn't have auth header
|
||||
|
||||
let _ = uuid;
|
||||
@@ -54,7 +51,7 @@ fn clear_device_token<'a>(uuid: String) -> Response<'a> {
|
||||
// This only clears push token
|
||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
Response::new()
|
||||
""
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
@@ -146,22 +143,15 @@ fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbC
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
fn hibp_breach(username: String) -> JsonResult {
|
||||
let user_agent = "Bitwarden_RS";
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||
username
|
||||
);
|
||||
|
||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let hibp_client = Client::builder().build()?;
|
||||
let hibp_client = get_reqwest_client();
|
||||
|
||||
let res = hibp_client
|
||||
.get(&url)
|
||||
.header(USER_AGENT, user_agent)
|
||||
.header("hibp-api-key", api_key)
|
||||
.send()?;
|
||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send()?;
|
||||
|
||||
// If we get a 404, return a 404, it means no breached accounts
|
||||
if res.status() == 404 {
|
||||
|
@@ -5,7 +5,7 @@ use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders, ManagerHeaders, ManagerHeadersLoose},
|
||||
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
@@ -51,6 +51,7 @@ pub fn routes() -> Vec<Route> {
|
||||
get_plans,
|
||||
get_plans_tax_rates,
|
||||
import,
|
||||
post_org_keys,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -61,6 +62,7 @@ struct OrgData {
|
||||
CollectionName: String,
|
||||
Key: String,
|
||||
Name: String,
|
||||
Keys: Option<OrgKeyData>,
|
||||
#[serde(rename = "PlanType")]
|
||||
_PlanType: NumberOrString, // Ignored, always use the same plan
|
||||
}
|
||||
@@ -78,6 +80,13 @@ struct NewCollectionData {
|
||||
Name: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgKeyData {
|
||||
EncryptedPrivateKey: String,
|
||||
PublicKey: String,
|
||||
}
|
||||
|
||||
#[post("/organizations", data = "<data>")]
|
||||
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.is_org_creation_allowed(&headers.user.email) {
|
||||
@@ -85,8 +94,14 @@ fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn
|
||||
}
|
||||
|
||||
let data: OrgData = data.into_inner().data;
|
||||
let (private_key, public_key) = if data.Keys.is_some() {
|
||||
let keys: OrgKeyData = data.Keys.unwrap();
|
||||
(Some(keys.EncryptedPrivateKey), Some(keys.PublicKey))
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let org = Organization::new(data.Name, data.BillingEmail);
|
||||
let org = Organization::new(data.Name, data.BillingEmail, private_key, public_key);
|
||||
let mut user_org = UserOrganization::new(headers.user.uuid, org.uuid.clone());
|
||||
let collection = Collection::new(org.uuid.clone(), data.CollectionName);
|
||||
|
||||
@@ -333,7 +348,12 @@ fn post_organization_collection_delete_user(
|
||||
}
|
||||
|
||||
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
||||
fn delete_organization_collection(org_id: String, col_id: String, _headers: ManagerHeaders, conn: DbConn) -> EmptyResult {
|
||||
fn delete_organization_collection(
|
||||
org_id: String,
|
||||
col_id: String,
|
||||
_headers: ManagerHeaders,
|
||||
conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
match Collection::find_by_uuid(&col_id, &conn) {
|
||||
None => err!("Collection not found"),
|
||||
Some(collection) => {
|
||||
@@ -392,7 +412,7 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: ManagerHeader
|
||||
.map(|col_user| {
|
||||
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
|
||||
.unwrap()
|
||||
.to_json_user_access_restrictions(&col_user)
|
||||
.to_json_user_access_restrictions(col_user)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -426,9 +446,7 @@ fn put_collection_users(
|
||||
continue;
|
||||
}
|
||||
|
||||
CollectionUser::save(&user.user_uuid, &coll_id,
|
||||
d.ReadOnly, d.HidePasswords,
|
||||
&conn)?;
|
||||
CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, d.HidePasswords, &conn)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -443,10 +461,8 @@ struct OrgIdData {
|
||||
#[get("/ciphers/organization-details?<data..>")]
|
||||
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let ciphers = Cipher::find_by_org(&data.organization_id, &conn);
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
.collect();
|
||||
let ciphers_json: Vec<Value> =
|
||||
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||
|
||||
Json(json!({
|
||||
"Data": ciphers_json,
|
||||
@@ -467,6 +483,32 @@ fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) ->
|
||||
}))
|
||||
}
|
||||
|
||||
#[post("/organizations/<org_id>/keys", data = "<data>")]
|
||||
fn post_org_keys(org_id: String, data: JsonUpcase<OrgKeyData>, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
let data: OrgKeyData = data.into_inner().data;
|
||||
|
||||
let mut org = match Organization::find_by_uuid(&org_id, &conn) {
|
||||
Some(organization) => {
|
||||
if organization.private_key.is_some() && organization.public_key.is_some() {
|
||||
err!("Organization Keys already exist")
|
||||
}
|
||||
organization
|
||||
}
|
||||
None => err!("Can't find organization details"),
|
||||
};
|
||||
|
||||
org.private_key = Some(data.EncryptedPrivateKey);
|
||||
org.public_key = Some(data.PublicKey);
|
||||
|
||||
org.save(&conn)?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Object": "organizationKeys",
|
||||
"PublicKey": org.public_key,
|
||||
"PrivateKey": org.private_key,
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct CollectionData {
|
||||
@@ -503,13 +545,13 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
} else {
|
||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||
};
|
||||
let user = match User::find_by_mail(&email, &conn) {
|
||||
let user = match User::find_by_mail(email, &conn) {
|
||||
None => {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("User does not exist: {}", email))
|
||||
}
|
||||
|
||||
if !CONFIG.is_email_domain_allowed(&email) {
|
||||
if !CONFIG.is_email_domain_allowed(email) {
|
||||
err!("Email domain not eligible for invitations")
|
||||
}
|
||||
|
||||
@@ -544,9 +586,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
CollectionUser::save(&user.uuid, &collection.uuid,
|
||||
col.ReadOnly, col.HidePasswords,
|
||||
&conn)?;
|
||||
CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, col.HidePasswords, &conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -561,7 +601,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
};
|
||||
|
||||
mail::send_invite(
|
||||
&email,
|
||||
email,
|
||||
&user.uuid,
|
||||
Some(org_id.clone()),
|
||||
Some(new_user.uuid),
|
||||
@@ -631,7 +671,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
||||
// The web-vault passes org_id and org_user_id in the URL, but we are just reading them from the JWT instead
|
||||
let data: AcceptData = data.into_inner().data;
|
||||
let token = &data.Token;
|
||||
let claims = decode_invite(&token)?;
|
||||
let claims = decode_invite(token)?;
|
||||
|
||||
match User::find_by_mail(&claims.email, &conn) {
|
||||
Some(_) => {
|
||||
@@ -647,6 +687,19 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
||||
err!("User already accepted the invitation")
|
||||
}
|
||||
|
||||
let user_twofactor_disabled = TwoFactor::find_by_user(&user_org.user_uuid, &conn).is_empty();
|
||||
|
||||
let policy = OrgPolicyType::TwoFactorAuthentication as i32;
|
||||
let org_twofactor_policy_enabled =
|
||||
match OrgPolicy::find_by_org_and_type(&user_org.org_uuid, policy, &conn) {
|
||||
Some(p) => p.enabled,
|
||||
None => false,
|
||||
};
|
||||
|
||||
if org_twofactor_policy_enabled && user_twofactor_disabled {
|
||||
err!("You cannot join this organization until you enable two-step login on your user account.")
|
||||
}
|
||||
|
||||
user_org.status = UserOrgStatus::Accepted as i32;
|
||||
user_org.save(&conn)?;
|
||||
}
|
||||
@@ -657,7 +710,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
||||
if CONFIG.mail_enabled() {
|
||||
let mut org_name = CONFIG.invitation_org_name();
|
||||
if let Some(org_id) = &claims.org_id {
|
||||
org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||
org_name = match Organization::find_by_uuid(org_id, &conn) {
|
||||
Some(org) => org.name,
|
||||
None => err!("Organization not found."),
|
||||
};
|
||||
@@ -801,9 +854,13 @@ fn edit_user(
|
||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid,
|
||||
col.ReadOnly, col.HidePasswords,
|
||||
&conn)?;
|
||||
CollectionUser::save(
|
||||
&user_to_edit.user_uuid,
|
||||
&collection.uuid,
|
||||
col.ReadOnly,
|
||||
col.HidePasswords,
|
||||
&conn,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -899,16 +956,8 @@ fn post_org_import(
|
||||
.into_iter()
|
||||
.map(|cipher_data| {
|
||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||
update_cipher_from_data(
|
||||
&mut cipher,
|
||||
cipher_data,
|
||||
&headers,
|
||||
false,
|
||||
&conn,
|
||||
&nt,
|
||||
UpdateType::CipherCreate,
|
||||
)
|
||||
.ok();
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::CipherCreate)
|
||||
.ok();
|
||||
cipher
|
||||
})
|
||||
.collect();
|
||||
@@ -989,7 +1038,13 @@ struct PolicyData {
|
||||
}
|
||||
|
||||
#[put("/organizations/<org_id>/policies/<pol_type>", data = "<data>")]
|
||||
fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn put_policy(
|
||||
org_id: String,
|
||||
pol_type: i32,
|
||||
data: Json<PolicyData>,
|
||||
_headers: AdminHeaders,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: PolicyData = data.into_inner();
|
||||
|
||||
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||
@@ -997,6 +1052,24 @@ fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: A
|
||||
None => err!("Invalid policy type"),
|
||||
};
|
||||
|
||||
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
|
||||
let org_list = UserOrganization::find_by_org(&org_id, &conn);
|
||||
|
||||
for user_org in org_list.into_iter() {
|
||||
let user_twofactor_disabled = TwoFactor::find_by_user(&user_org.user_uuid, &conn).is_empty();
|
||||
|
||||
if user_twofactor_disabled && user_org.atype < UserOrgType::Admin {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).unwrap();
|
||||
let user = User::find_by_uuid(&user_org.user_uuid, &conn).unwrap();
|
||||
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name)?;
|
||||
}
|
||||
user_org.delete(&conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||
Some(p) => p,
|
||||
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
|
||||
@@ -1127,8 +1200,7 @@ fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Headers, con
|
||||
|
||||
// If user is not part of the organization, but it exists
|
||||
} else if UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).is_none() {
|
||||
if let Some (user) = User::find_by_mail(&user_data.Email, &conn) {
|
||||
|
||||
if let Some(user) = User::find_by_mail(&user_data.Email, &conn) {
|
||||
let user_org_status = if CONFIG.mail_enabled() {
|
||||
UserOrgStatus::Invited as i32
|
||||
} else {
|
||||
@@ -1164,7 +1236,7 @@ fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Headers, con
|
||||
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||
if data.OverwriteExisting {
|
||||
for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User as i32, &conn) {
|
||||
if let Some (user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).map(|u| u.email) {
|
||||
if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).map(|u| u.email) {
|
||||
if !data.Users.iter().any(|u| u.Email == user_email) {
|
||||
user_org.delete(&conn)?;
|
||||
}
|
||||
|
@@ -2,14 +2,15 @@ use std::{io::Read, path::Path};
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||
use rocket::{http::ContentType, Data};
|
||||
use rocket::{http::ContentType, response::NamedFile, Data};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::{Headers, Host},
|
||||
db::{models::*, DbConn},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
util::SafeString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -23,25 +24,18 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
post_access_file,
|
||||
put_send,
|
||||
delete_send,
|
||||
put_remove_password
|
||||
put_remove_password,
|
||||
download_send
|
||||
]
|
||||
}
|
||||
|
||||
pub fn start_send_deletion_scheduler(pool: crate::db::DbPool) {
|
||||
std::thread::spawn(move || {
|
||||
loop {
|
||||
if let Ok(conn) = pool.get() {
|
||||
info!("Initiating send deletion");
|
||||
for send in Send::find_all(&conn) {
|
||||
if chrono::Utc::now().naive_utc() >= send.deletion_date {
|
||||
send.delete(&conn).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_secs(3600));
|
||||
}
|
||||
});
|
||||
pub fn purge_sends(pool: DbPool) {
|
||||
debug!("Purging sends");
|
||||
if let Ok(conn) = pool.get() {
|
||||
Send::purge(&conn);
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging sends")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -54,6 +48,7 @@ pub struct SendData {
|
||||
pub ExpirationDate: Option<DateTime<Utc>>,
|
||||
pub DeletionDate: DateTime<Utc>,
|
||||
pub Disabled: bool,
|
||||
pub HideEmail: Option<bool>,
|
||||
|
||||
// Data field
|
||||
pub Name: String,
|
||||
@@ -67,15 +62,36 @@ pub struct SendData {
|
||||
/// modify existing ones, but is allowed to delete them.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#disable-send
|
||||
///
|
||||
/// There is also a Vaultwarden-specific `sends_allowed` config setting that
|
||||
/// controls this policy globally.
|
||||
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::DisableSend;
|
||||
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
if !CONFIG.sends_allowed() || OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Enforces the `DisableHideEmail` option of the `Send Options` policy.
|
||||
/// A non-owner/admin user belonging to an org with this option enabled isn't
|
||||
/// allowed to hide their email address from the recipient of a Bitwarden Send,
|
||||
/// but is allowed to remove this option from an existing Send.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||
fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let hide_email = data.HideEmail.unwrap_or(false);
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn) {
|
||||
err!(
|
||||
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||
from recipients when creating or editing a Send."
|
||||
)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
let data_val = if data.Type == SendType::Text as i32 {
|
||||
data.Text
|
||||
@@ -104,6 +120,7 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.disabled = data.Disabled;
|
||||
send.hide_email = data.HideEmail;
|
||||
send.atype = data.Type;
|
||||
|
||||
send.set_password(data.Password.as_deref());
|
||||
@@ -116,6 +133,7 @@ fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Not
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||
|
||||
if data.Type == SendType::File as i32 {
|
||||
err!("File sends should use /api/sends/file")
|
||||
@@ -146,25 +164,26 @@ fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn
|
||||
let mut buf = String::new();
|
||||
model_entry.data.read_to_string(&mut buf)?;
|
||||
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
|
||||
enforce_disable_hide_email_policy(&data.data, &headers, &conn)?;
|
||||
|
||||
// Get the file length and add an extra 10% to avoid issues
|
||||
const SIZE_110_MB: u64 = 115_343_360;
|
||||
// Get the file length and add an extra 5% to avoid issues
|
||||
const SIZE_525_MB: u64 = 550_502_400;
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
|
||||
if left <= 0 {
|
||||
err!("Attachment size limit reached! Delete some files to open space")
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_110_MB)
|
||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||
}
|
||||
None => SIZE_110_MB,
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
// Create the Send
|
||||
let mut send = create_send(data.data, headers.user.uuid.clone())?;
|
||||
let file_id: String = data_encoding::HEXLOWER.encode(&crate::crypto::get_random(vec![0; 32]));
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
@@ -179,13 +198,7 @@ fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
|
||||
let size = match data_entry
|
||||
.data
|
||||
.save()
|
||||
.memory_threshold(0)
|
||||
.size_limit(size_limit)
|
||||
.with_path(&file_path)
|
||||
{
|
||||
let size = match data_entry.data.save().memory_threshold(0).size_limit(size_limit).with_path(&file_path) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
@@ -193,7 +206,7 @@ fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||
err!(format!("Attachment storage limit exceeded with this file: {:?}", reason));
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
@@ -206,10 +219,7 @@ fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id));
|
||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||
o.insert(
|
||||
String::from("SizeName"),
|
||||
Value::String(crate::util::get_display_size(size)),
|
||||
);
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
|
||||
@@ -268,7 +278,7 @@ fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn
|
||||
|
||||
send.save(&conn)?;
|
||||
|
||||
Ok(Json(send.to_json_access()))
|
||||
Ok(Json(send.to_json_access(&conn)))
|
||||
}
|
||||
|
||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||
@@ -316,18 +326,31 @@ fn post_access_file(
|
||||
|
||||
send.save(&conn)?;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
Ok(Json(json!({
|
||||
"Object": "send-fileDownload",
|
||||
"Id": file_id,
|
||||
"Url": format!("{}/sends/{}/{}", &host.host, send_id, file_id)
|
||||
"Url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||
fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(&t) {
|
||||
if claims.sub == format!("{}/{}", send_id, file_id) {
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).ok();
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[put("/sends/<id>", data = "<data>")]
|
||||
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
Some(s) => s,
|
||||
@@ -365,6 +388,7 @@ fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbCo
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.hide_email = data.HideEmail;
|
||||
send.disabled = data.Disabled;
|
||||
|
||||
// Only change the value if it's present
|
||||
|
@@ -17,11 +17,7 @@ use crate::{
|
||||
pub use crate::config::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
generate_authenticator,
|
||||
activate_authenticator,
|
||||
activate_authenticator_put,
|
||||
]
|
||||
routes![generate_authenticator, activate_authenticator, activate_authenticator_put,]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||
@@ -118,7 +114,7 @@ pub fn validate_totp_code_str(
|
||||
_ => err!("TOTP code is not a number"),
|
||||
};
|
||||
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, &conn)
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, conn)
|
||||
}
|
||||
|
||||
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
||||
@@ -129,7 +125,7 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &Cl
|
||||
Err(_) => err!("Invalid TOTP secret"),
|
||||
};
|
||||
|
||||
let mut twofactor = match TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Authenticator as i32, &conn) {
|
||||
let mut twofactor = match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn) {
|
||||
Some(tf) => tf,
|
||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||
};
|
||||
@@ -141,7 +137,7 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &Cl
|
||||
// The amount of steps back and forward in time
|
||||
// Also check if we need to disable time drifted TOTP codes.
|
||||
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
||||
let steps: i64 = if CONFIG.authenticator_disable_time_drift() { 0 } else { 1 };
|
||||
let steps = !CONFIG.authenticator_disable_time_drift() as i64;
|
||||
|
||||
for step in -steps..=steps {
|
||||
let time_step = current_timestamp / 30i64 + step;
|
||||
@@ -160,25 +156,14 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &Cl
|
||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||
// This will also save a newly created twofactor if the code is correct.
|
||||
twofactor.last_used = time_step as i32;
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(conn)?;
|
||||
return Ok(());
|
||||
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||
warn!(
|
||||
"This or a TOTP code within {} steps back and forward has already been used!",
|
||||
steps
|
||||
);
|
||||
err!(format!(
|
||||
"Invalid TOTP code! Server time: {} IP: {}",
|
||||
current_time.format("%F %T UTC"),
|
||||
ip.ip
|
||||
));
|
||||
warn!("This or a TOTP code within {} steps back and forward has already been used!", steps);
|
||||
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||
}
|
||||
}
|
||||
|
||||
// Else no valide code received, deny access
|
||||
err!(format!(
|
||||
"Invalid TOTP code! Server time: {} IP: {}",
|
||||
current_time.format("%F %T UTC"),
|
||||
ip.ip
|
||||
));
|
||||
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||
}
|
||||
|
@@ -12,6 +12,7 @@ use crate::{
|
||||
DbConn,
|
||||
},
|
||||
error::MapResult,
|
||||
util::get_reqwest_client,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -59,7 +60,11 @@ impl DuoData {
|
||||
ik.replace_range(digits.., replaced);
|
||||
sk.replace_range(digits.., replaced);
|
||||
|
||||
Self { host, ik, sk }
|
||||
Self {
|
||||
host,
|
||||
ik,
|
||||
sk,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,9 +190,7 @@ fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbC
|
||||
}
|
||||
|
||||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
|
||||
|
||||
use reqwest::{blocking::Client, header::*, Method};
|
||||
use reqwest::{header, Method};
|
||||
use std::str::FromStr;
|
||||
|
||||
// https://duo.com/docs/authapi#api-details
|
||||
@@ -199,11 +202,13 @@ fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> Em
|
||||
|
||||
let m = Method::from_str(method).unwrap_or_default();
|
||||
|
||||
Client::new()
|
||||
let client = get_reqwest_client();
|
||||
|
||||
client
|
||||
.request(m, &url)
|
||||
.basic_auth(username, Some(password))
|
||||
.header(USER_AGENT, AGENT)
|
||||
.header(DATE, date)
|
||||
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||
.header(header::DATE, date)
|
||||
.send()?
|
||||
.error_for_status()?;
|
||||
|
||||
@@ -221,7 +226,7 @@ fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||
let type_ = TwoFactorType::Duo as i32;
|
||||
|
||||
// If the user doesn't have an entry, disabled
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, &conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn) {
|
||||
Some(t) => t,
|
||||
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
||||
};
|
||||
@@ -242,8 +247,8 @@ fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||
|
||||
// let (ik, sk, ak, host) = get_duo_keys();
|
||||
fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
|
||||
let data = User::find_by_mail(email, &conn)
|
||||
.and_then(|u| get_user_duo_data(&u.uuid, &conn).data())
|
||||
let data = User::find_by_mail(email, conn)
|
||||
.and_then(|u| get_user_duo_data(&u.uuid, conn).data())
|
||||
.or_else(DuoData::global)
|
||||
.map_res("Can't fetch Duo keys")?;
|
||||
|
||||
@@ -338,7 +343,7 @@ fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -
|
||||
err!("Invalid ikey")
|
||||
}
|
||||
|
||||
let expire = match expire.parse() {
|
||||
let expire: i64 = match expire.parse() {
|
||||
Ok(e) => e,
|
||||
Err(_) => err!("Invalid expire time"),
|
||||
};
|
||||
|
@@ -56,14 +56,14 @@ fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> Empty
|
||||
/// Generate the token, save the data for later verification and send email to user
|
||||
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn).map_res("Two factor not found")?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, conn).map_res("Two factor not found")?;
|
||||
|
||||
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
|
||||
|
||||
let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
twofactor_data.set_token(generated_token);
|
||||
twofactor.data = twofactor_data.to_json();
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(conn)?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
|
||||
@@ -125,11 +125,7 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
|
||||
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
|
||||
|
||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||
let twofactor = TwoFactor::new(
|
||||
user.uuid,
|
||||
TwoFactorType::EmailVerificationChallenge,
|
||||
twofactor_data.to_json(),
|
||||
);
|
||||
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
||||
twofactor.save(&conn)?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
@@ -185,8 +181,9 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
|
||||
/// Validate the email code when used as TwoFactor token mechanism
|
||||
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
||||
let mut email_data = EmailTokenData::from_json(&data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn).map_res("Two factor not found")?;
|
||||
let mut email_data = EmailTokenData::from_json(data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn)
|
||||
.map_res("Two factor not found")?;
|
||||
let issued_token = match &email_data.last_token {
|
||||
Some(t) => t,
|
||||
_ => err!("No token available"),
|
||||
@@ -198,14 +195,14 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &
|
||||
email_data.reset_token();
|
||||
}
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(conn)?;
|
||||
|
||||
err!("Token is invalid")
|
||||
}
|
||||
|
||||
email_data.reset_token();
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(conn)?;
|
||||
|
||||
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
|
||||
let max_time = CONFIG.email_expiration_time() as i64;
|
||||
@@ -258,7 +255,7 @@ impl EmailTokenData {
|
||||
}
|
||||
|
||||
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
|
||||
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(&string);
|
||||
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(string);
|
||||
match res {
|
||||
Ok(x) => Ok(x),
|
||||
Err(_) => err!("Could not decode EmailTokenData from string"),
|
||||
@@ -295,7 +292,7 @@ mod tests {
|
||||
fn test_obscure_email_long() {
|
||||
let email = "bytes@example.ext";
|
||||
|
||||
let result = obscure_email(&email);
|
||||
let result = obscure_email(email);
|
||||
|
||||
// Only first two characters should be visible.
|
||||
assert_eq!(result, "by***@example.ext");
|
||||
@@ -305,7 +302,7 @@ mod tests {
|
||||
fn test_obscure_email_short() {
|
||||
let email = "byt@example.ext";
|
||||
|
||||
let result = obscure_email(&email);
|
||||
let result = obscure_email(email);
|
||||
|
||||
// If it's smaller than 3 characters it should only show asterisks.
|
||||
assert_eq!(result, "***@example.ext");
|
||||
|
@@ -7,31 +7,25 @@ use crate::{
|
||||
api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, User},
|
||||
DbConn,
|
||||
},
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
pub mod authenticator;
|
||||
pub mod duo;
|
||||
pub mod email;
|
||||
pub mod u2f;
|
||||
pub mod webauthn;
|
||||
pub mod yubikey;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut routes = routes![
|
||||
get_twofactor,
|
||||
get_recover,
|
||||
recover,
|
||||
disable_twofactor,
|
||||
disable_twofactor_put,
|
||||
];
|
||||
let mut routes = routes![get_twofactor, get_recover, recover, disable_twofactor, disable_twofactor_put,];
|
||||
|
||||
routes.append(&mut authenticator::routes());
|
||||
routes.append(&mut duo::routes());
|
||||
routes.append(&mut email::routes());
|
||||
routes.append(&mut u2f::routes());
|
||||
routes.append(&mut webauthn::routes());
|
||||
routes.append(&mut yubikey::routes());
|
||||
|
||||
routes
|
||||
@@ -134,6 +128,23 @@ fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, c
|
||||
twofactor.delete(&conn)?;
|
||||
}
|
||||
|
||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &conn).is_empty();
|
||||
|
||||
if twofactor_disabled {
|
||||
let policy_type = OrgPolicyType::TwoFactorAuthentication;
|
||||
let org_list = UserOrganization::find_by_user_and_policy(&user.uuid, policy_type, &conn);
|
||||
|
||||
for user_org in org_list.into_iter() {
|
||||
if user_org.atype < UserOrgType::Admin {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name)?;
|
||||
}
|
||||
user_org.delete(&conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": false,
|
||||
"Type": type_,
|
||||
|
@@ -28,13 +28,7 @@ static APP_ID: Lazy<String> = Lazy::new(|| format!("{}/app-id.json", &CONFIG.dom
|
||||
static U2F: Lazy<U2f> = Lazy::new(|| U2f::new(APP_ID.clone()));
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
generate_u2f,
|
||||
generate_u2f_challenge,
|
||||
activate_u2f,
|
||||
activate_u2f_put,
|
||||
delete_u2f,
|
||||
]
|
||||
routes![generate_u2f, generate_u2f_challenge, activate_u2f, activate_u2f_put, delete_u2f,]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-u2f", data = "<data>")]
|
||||
@@ -100,13 +94,14 @@ struct RegistrationDef {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct U2FRegistration {
|
||||
id: i32,
|
||||
name: String,
|
||||
pub struct U2FRegistration {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
#[serde(with = "RegistrationDef")]
|
||||
reg: Registration,
|
||||
counter: u32,
|
||||
pub reg: Registration,
|
||||
pub counter: u32,
|
||||
compromised: bool,
|
||||
pub migrated: Option<bool>,
|
||||
}
|
||||
|
||||
impl U2FRegistration {
|
||||
@@ -161,10 +156,7 @@ fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn)
|
||||
|
||||
let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
|
||||
|
||||
let error_code = response
|
||||
.error_code
|
||||
.clone()
|
||||
.map_or("0".into(), NumberOrString::into_string);
|
||||
let error_code = response.error_code.clone().map_or("0".into(), NumberOrString::into_string);
|
||||
|
||||
if error_code != "0" {
|
||||
err!("Error registering U2F token")
|
||||
@@ -177,6 +169,7 @@ fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn)
|
||||
reg: registration,
|
||||
compromised: false,
|
||||
counter: 0,
|
||||
migrated: None,
|
||||
};
|
||||
|
||||
let mut regs = get_u2f_registrations(&user.uuid, &conn)?.1;
|
||||
@@ -255,7 +248,7 @@ fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -
|
||||
}
|
||||
|
||||
fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult {
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(&conn)
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(conn)
|
||||
}
|
||||
|
||||
fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2FRegistration>), Error> {
|
||||
@@ -282,10 +275,11 @@ fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2
|
||||
reg: old_regs.remove(0),
|
||||
compromised: false,
|
||||
counter: 0,
|
||||
migrated: None,
|
||||
}];
|
||||
|
||||
// Save new format
|
||||
save_u2f_registrations(user_uuid, &new_regs, &conn)?;
|
||||
save_u2f_registrations(user_uuid, &new_regs, conn)?;
|
||||
|
||||
new_regs
|
||||
}
|
||||
@@ -300,20 +294,13 @@ fn _old_parse_registrations(registations: &str) -> Vec<Registration> {
|
||||
|
||||
let regs: Vec<Value> = serde_json::from_str(registations).expect("Can't parse Registration data");
|
||||
|
||||
regs.into_iter()
|
||||
.map(|r| serde_json::from_value(r).unwrap())
|
||||
.map(|Helper(r)| r)
|
||||
.collect()
|
||||
regs.into_iter().map(|r| serde_json::from_value(r).unwrap()).map(|Helper(r)| r).collect()
|
||||
}
|
||||
|
||||
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
|
||||
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
|
||||
|
||||
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?
|
||||
.1
|
||||
.into_iter()
|
||||
.map(|r| r.reg)
|
||||
.collect();
|
||||
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.reg).collect();
|
||||
|
||||
if registrations.is_empty() {
|
||||
err!("No U2F devices registered")
|
||||
@@ -324,12 +311,12 @@ pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRe
|
||||
|
||||
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
|
||||
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, &conn);
|
||||
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, conn);
|
||||
|
||||
let challenge = match tf_challenge {
|
||||
Some(tf_challenge) => {
|
||||
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
|
||||
tf_challenge.delete(&conn)?;
|
||||
tf_challenge.delete(conn)?;
|
||||
challenge
|
||||
}
|
||||
None => err!("Can't recover login challenge"),
|
||||
@@ -345,13 +332,13 @@ pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> Emp
|
||||
match response {
|
||||
Ok(new_counter) => {
|
||||
reg.counter = new_counter;
|
||||
save_u2f_registrations(user_uuid, ®istrations, &conn)?;
|
||||
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
Err(u2f::u2ferror::U2fError::CounterTooLow) => {
|
||||
reg.compromised = true;
|
||||
save_u2f_registrations(user_uuid, ®istrations, &conn)?;
|
||||
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||
|
||||
err!("This device might be compromised!");
|
||||
}
|
||||
|
386
src/api/core/two_factor/webauthn.rs
Normal file
@@ -0,0 +1,386 @@
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_webauthn, generate_webauthn_challenge, activate_webauthn, activate_webauthn_put, delete_webauthn,]
|
||||
}
|
||||
|
||||
struct WebauthnConfig {
|
||||
url: String,
|
||||
rpid: String,
|
||||
}
|
||||
|
||||
impl WebauthnConfig {
|
||||
fn load() -> Webauthn<Self> {
|
||||
let domain = CONFIG.domain();
|
||||
Webauthn::new(Self {
|
||||
rpid: reqwest::Url::parse(&domain)
|
||||
.map(|u| u.domain().map(str::to_owned))
|
||||
.ok()
|
||||
.flatten()
|
||||
.unwrap_or_default(),
|
||||
url: domain,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl webauthn_rs::WebauthnConfig for WebauthnConfig {
|
||||
fn get_relying_party_name(&self) -> &str {
|
||||
&self.url
|
||||
}
|
||||
|
||||
fn get_origin(&self) -> &str {
|
||||
&self.url
|
||||
}
|
||||
|
||||
fn get_relying_party_id(&self) -> &str {
|
||||
&self.rpid
|
||||
}
|
||||
|
||||
/// We have WebAuthn configured to discourage user verification
|
||||
/// if we leave this enabled, it will cause verification issues when a keys send UV=1.
|
||||
/// Upstream (the library they use) ignores this when set to discouraged, so we should too.
|
||||
fn get_require_uv_consistency(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct WebauthnRegistration {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
pub migrated: bool,
|
||||
|
||||
pub credential: Credential,
|
||||
}
|
||||
|
||||
impl WebauthnRegistration {
|
||||
fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.id,
|
||||
"Name": self.name,
|
||||
"migrated": self.migrated,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||
fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.domain_set() {
|
||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||
}
|
||||
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &conn)?;
|
||||
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": enabled,
|
||||
"Keys": registrations_json,
|
||||
"Object": "twoFactorWebAuthn"
|
||||
})))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||
fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &conn)?
|
||||
.1
|
||||
.into_iter()
|
||||
.map(|r| r.credential.cred_id) // We return the credentialIds to the clients to avoid double registering
|
||||
.collect();
|
||||
|
||||
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
|
||||
headers.user.uuid.as_bytes().to_vec(),
|
||||
headers.user.email,
|
||||
headers.user.name,
|
||||
Some(registrations),
|
||||
None,
|
||||
None,
|
||||
)?;
|
||||
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&conn)?;
|
||||
|
||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||
challenge_value["status"] = "ok".into();
|
||||
challenge_value["errorMessage"] = "".into();
|
||||
Ok(Json(challenge_value))
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EnableWebauthnData {
|
||||
Id: NumberOrString, // 1..5
|
||||
Name: String,
|
||||
MasterPasswordHash: String,
|
||||
DeviceResponse: RegisterPublicKeyCredentialCopy,
|
||||
}
|
||||
|
||||
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct RegisterPublicKeyCredentialCopy {
|
||||
pub Id: String,
|
||||
pub RawId: Base64UrlSafeData,
|
||||
pub Response: AuthenticatorAttestationResponseRawCopy,
|
||||
pub Type: String,
|
||||
}
|
||||
|
||||
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct AuthenticatorAttestationResponseRawCopy {
|
||||
pub AttestationObject: Base64UrlSafeData,
|
||||
pub ClientDataJson: Base64UrlSafeData,
|
||||
}
|
||||
|
||||
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
|
||||
fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
|
||||
Self {
|
||||
id: r.Id,
|
||||
raw_id: r.RawId,
|
||||
response: AuthenticatorAttestationResponseRaw {
|
||||
attestation_object: r.Response.AttestationObject,
|
||||
client_data_json: r.Response.ClientDataJson,
|
||||
},
|
||||
type_: r.Type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is copied from PublicKeyCredential to change the Response objects casing
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct PublicKeyCredentialCopy {
|
||||
pub Id: String,
|
||||
pub RawId: Base64UrlSafeData,
|
||||
pub Response: AuthenticatorAssertionResponseRawCopy,
|
||||
pub Extensions: Option<AuthenticationExtensionsClientOutputsCopy>,
|
||||
pub Type: String,
|
||||
}
|
||||
|
||||
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct AuthenticatorAssertionResponseRawCopy {
|
||||
pub AuthenticatorData: Base64UrlSafeData,
|
||||
pub ClientDataJson: Base64UrlSafeData,
|
||||
pub Signature: Base64UrlSafeData,
|
||||
pub UserHandle: Option<Base64UrlSafeData>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct AuthenticationExtensionsClientOutputsCopy {
|
||||
#[serde(default)]
|
||||
pub Appid: bool,
|
||||
}
|
||||
|
||||
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||
fn from(r: PublicKeyCredentialCopy) -> Self {
|
||||
Self {
|
||||
id: r.Id,
|
||||
raw_id: r.RawId,
|
||||
response: AuthenticatorAssertionResponseRaw {
|
||||
authenticator_data: r.Response.AuthenticatorData,
|
||||
client_data_json: r.Response.ClientDataJson,
|
||||
signature: r.Response.Signature,
|
||||
user_handle: r.Response.UserHandle,
|
||||
},
|
||||
extensions: r.Extensions.map(|e| AuthenticationExtensionsClientOutputs {
|
||||
appid: e.Appid,
|
||||
}),
|
||||
type_: r.Type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/two-factor/webauthn", data = "<data>")]
|
||||
fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: EnableWebauthnData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
// Retrieve and delete the saved challenge state
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
||||
let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
Some(tf) => {
|
||||
let state: RegistrationState = serde_json::from_str(&tf.data)?;
|
||||
tf.delete(&conn)?;
|
||||
state
|
||||
}
|
||||
None => err!("Can't recover challenge"),
|
||||
};
|
||||
|
||||
// Verify the credentials with the saved state
|
||||
let (credential, _data) =
|
||||
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
|
||||
|
||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn)?.1;
|
||||
// TODO: Check for repeated ID's
|
||||
registrations.push(WebauthnRegistration {
|
||||
id: data.Id.into_i32()?,
|
||||
name: data.Name,
|
||||
migrated: false,
|
||||
|
||||
credential,
|
||||
});
|
||||
|
||||
// Save the registrations and return them
|
||||
TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?).save(&conn)?;
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
|
||||
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/two-factor/webauthn", data = "<data>")]
|
||||
fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct DeleteU2FData {
|
||||
Id: NumberOrString,
|
||||
MasterPasswordHash: String,
|
||||
}
|
||||
|
||||
#[delete("/two-factor/webauthn", data = "<data>")]
|
||||
fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let id = data.data.Id.into_i32()?;
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &conn) {
|
||||
Some(tf) => tf,
|
||||
None => err!("Webauthn data not found!"),
|
||||
};
|
||||
|
||||
let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?;
|
||||
|
||||
let item_pos = match data.iter().position(|r| r.id == id) {
|
||||
Some(p) => p,
|
||||
None => err!("Webauthn entry not found"),
|
||||
};
|
||||
|
||||
let removed_item = data.remove(item_pos);
|
||||
tf.data = serde_json::to_string(&data)?;
|
||||
tf.save(&conn)?;
|
||||
drop(tf);
|
||||
|
||||
// If entry is migrated from u2f, delete the u2f entry as well
|
||||
if let Some(mut u2f) = TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &conn) {
|
||||
use crate::api::core::two_factor::u2f::U2FRegistration;
|
||||
let mut data: Vec<U2FRegistration> = match serde_json::from_str(&u2f.data) {
|
||||
Ok(d) => d,
|
||||
Err(_) => err!("Error parsing U2F data"),
|
||||
};
|
||||
|
||||
data.retain(|r| r.reg.key_handle != removed_item.credential.cred_id);
|
||||
let new_data_str = serde_json::to_string(&data)?;
|
||||
|
||||
u2f.data = new_data_str;
|
||||
u2f.save(&conn)?;
|
||||
}
|
||||
|
||||
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
pub fn get_webauthn_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||
let type_ = TwoFactorType::Webauthn as i32;
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)),
|
||||
None => Ok((false, Vec::new())), // If no data, return empty list
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
|
||||
// Load saved credentials
|
||||
let creds: Vec<Credential> =
|
||||
get_webauthn_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.credential).collect();
|
||||
|
||||
if creds.is_empty() {
|
||||
err!("No Webauthn devices registered")
|
||||
}
|
||||
|
||||
// Generate a challenge based on the credentials
|
||||
let ext = RequestAuthenticationExtensions::builder().appid(format!("{}/app-id.json", &CONFIG.domain())).build();
|
||||
let (response, state) = WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?;
|
||||
|
||||
// Save the challenge state for later validation
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
|
||||
.save(conn)?;
|
||||
|
||||
// Return challenge to the clients
|
||||
Ok(Json(serde_json::to_value(response.public_key)?))
|
||||
}
|
||||
|
||||
pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
|
||||
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
Some(tf) => {
|
||||
let state: AuthenticationState = serde_json::from_str(&tf.data)?;
|
||||
tf.delete(conn)?;
|
||||
state
|
||||
}
|
||||
None => err!("Can't recover login challenge"),
|
||||
};
|
||||
|
||||
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
|
||||
let rsp: PublicKeyCredential = rsp.data.into();
|
||||
|
||||
let mut registrations = get_webauthn_registrations(user_uuid, conn)?.1;
|
||||
|
||||
// If the credential we received is migrated from U2F, enable the U2F compatibility
|
||||
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
|
||||
let (cred_id, auth_data) = WebauthnConfig::load().authenticate_credential(&rsp, &state)?;
|
||||
|
||||
for reg in &mut registrations {
|
||||
if ®.credential.cred_id == cred_id {
|
||||
reg.credential.counter = auth_data.counter;
|
||||
|
||||
TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
.save(conn)?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
err!("Credential not present")
|
||||
}
|
286
src/api/icons.rs
@@ -3,55 +3,71 @@ use std::{
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::prelude::*,
|
||||
net::{IpAddr, ToSocketAddrs},
|
||||
sync::RwLock,
|
||||
sync::{Arc, RwLock},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use reqwest::{blocking::Client, blocking::Response, header, Url};
|
||||
use rocket::{http::ContentType, http::Cookie, response::Content, Route};
|
||||
use reqwest::{blocking::Client, blocking::Response, header};
|
||||
use rocket::{http::ContentType, response::Content, Route};
|
||||
|
||||
use crate::{error::Error, util::Cached, CONFIG};
|
||||
use crate::{
|
||||
error::Error,
|
||||
util::{get_reqwest_client_builder, Cached},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![icon]
|
||||
}
|
||||
|
||||
const ALLOWED_CHARS: &str = "_-.";
|
||||
|
||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
// Generate the default headers
|
||||
let mut default_headers = header::HeaderMap::new();
|
||||
default_headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en-US,en;q=0.8"));
|
||||
default_headers
|
||||
.insert(header::USER_AGENT, header::HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)"));
|
||||
default_headers
|
||||
.insert(header::ACCEPT, header::HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en,*;q=0.1"));
|
||||
default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::ACCEPT, header::HeaderValue::from_static("text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8"));
|
||||
|
||||
// Reuse the client between requests
|
||||
Client::builder()
|
||||
get_reqwest_client_builder()
|
||||
.cookie_provider(Arc::new(Jar::default()))
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(default_headers)
|
||||
.build()
|
||||
.unwrap()
|
||||
.expect("Failed to build icon client")
|
||||
});
|
||||
|
||||
// Build Regex only once since this takes a lot of time.
|
||||
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
||||
static ICON_REL_BLACKLIST: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)mask-icon").unwrap());
|
||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||
|
||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon(domain: String) -> Option<Cached<Content<Vec<u8>>>> {
|
||||
fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
|
||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||
|
||||
if !is_valid_domain(&domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return None;
|
||||
return Cached::ttl(
|
||||
Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
);
|
||||
}
|
||||
|
||||
get_icon(&domain).map(|icon| Cached::ttl(Content(ContentType::new("image", "x-icon"), icon), CONFIG.icon_cache_ttl()))
|
||||
match get_icon(&domain) {
|
||||
Some((icon, icon_type)) => {
|
||||
Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl())
|
||||
}
|
||||
_ => Cached::ttl(Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns if the domain provided is valid or not.
|
||||
@@ -59,8 +75,10 @@ fn icon(domain: String) -> Option<Cached<Content<Vec<u8>>>> {
|
||||
/// This does some manual checks and makes use of Url to do some basic checking.
|
||||
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
|
||||
fn is_valid_domain(domain: &str) -> bool {
|
||||
const ALLOWED_CHARS: &str = "_-.";
|
||||
|
||||
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||
if let Err(parse_error) = Url::parse(format!("https://{}", domain).as_str()) {
|
||||
if let Err(parse_error) = url::Url::parse(format!("https://{}", domain).as_str()) {
|
||||
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
||||
return false;
|
||||
} else if domain.is_empty()
|
||||
@@ -69,7 +87,10 @@ fn is_valid_domain(domain: &str) -> bool {
|
||||
|| domain.starts_with('-')
|
||||
|| domain.ends_with('-')
|
||||
{
|
||||
debug!("Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'", domain);
|
||||
debug!(
|
||||
"Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'",
|
||||
domain
|
||||
);
|
||||
return false;
|
||||
} else if domain.len() > 255 {
|
||||
debug!("Domain validation error: '{}' exceeds 255 characters", domain);
|
||||
@@ -228,7 +249,7 @@ fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
};
|
||||
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap.
|
||||
if regex.is_match(&domain) {
|
||||
if regex.is_match(domain) {
|
||||
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
|
||||
is_blacklisted = true;
|
||||
}
|
||||
@@ -238,7 +259,7 @@ fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
is_blacklisted
|
||||
}
|
||||
|
||||
fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||
fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||
|
||||
// Check for expiration of negatively cached copy
|
||||
@@ -247,7 +268,11 @@ fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||
}
|
||||
|
||||
if let Some(icon) = get_cached_icon(&path) {
|
||||
return Some(icon);
|
||||
let icon_type = match get_icon_type(&icon) {
|
||||
Some(x) => x,
|
||||
_ => "x-icon",
|
||||
};
|
||||
return Some((icon, icon_type.to_string()));
|
||||
}
|
||||
|
||||
if CONFIG.disable_icon_download() {
|
||||
@@ -255,10 +280,10 @@ fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||
}
|
||||
|
||||
// Get the icon, or None in case of error
|
||||
match download_icon(&domain) {
|
||||
Ok(icon) => {
|
||||
match download_icon(domain) {
|
||||
Ok((icon, icon_type)) => {
|
||||
save_icon(&path, &icon);
|
||||
Some(icon)
|
||||
Some((icon, icon_type.unwrap_or("x-icon").to_string()))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error downloading icon: {:?}", e);
|
||||
@@ -319,7 +344,6 @@ fn icon_is_expired(path: &str) -> bool {
|
||||
expired.unwrap_or(true)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Icon {
|
||||
priority: u8,
|
||||
href: String,
|
||||
@@ -327,12 +351,64 @@ struct Icon {
|
||||
|
||||
impl Icon {
|
||||
const fn new(priority: u8, href: String) -> Self {
|
||||
Self { href, priority }
|
||||
Self {
|
||||
priority,
|
||||
href,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Vec<Icon>, url: &Url) {
|
||||
if let markup5ever_rcdom::NodeData::Element { name, attrs, .. } = &node.data {
|
||||
/// Iterates over the HTML document to find <base href="http://domain.tld">
|
||||
/// When found it will stop the iteration and the found base href will be shared deref via `base_href`.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `node` - A Parsed HTML document via html5ever::parse_document()
|
||||
/// * `base_href` - a mutable url::Url which will be overwritten when a base href tag has been found.
|
||||
///
|
||||
fn get_base_href(node: &std::rc::Rc<markup5ever_rcdom::Node>, base_href: &mut url::Url) -> bool {
|
||||
if let markup5ever_rcdom::NodeData::Element {
|
||||
name,
|
||||
attrs,
|
||||
..
|
||||
} = &node.data
|
||||
{
|
||||
if name.local.as_ref() == "base" {
|
||||
let attrs = attrs.borrow();
|
||||
for attr in attrs.iter() {
|
||||
let attr_name = attr.name.local.as_ref();
|
||||
let attr_value = attr.value.as_ref();
|
||||
|
||||
if attr_name == "href" {
|
||||
debug!("Found base href: {}", attr_value);
|
||||
*base_href = match base_href.join(attr_value) {
|
||||
Ok(href) => href,
|
||||
_ => base_href.clone(),
|
||||
};
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Might want to limit the recursion depth?
|
||||
for child in node.children.borrow().iter() {
|
||||
// Check if we got a true back and stop the iter.
|
||||
// This means we found a <base> tag and can stop processing the html.
|
||||
if get_base_href(child, base_href) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Vec<Icon>, url: &url::Url) {
|
||||
if let markup5ever_rcdom::NodeData::Element {
|
||||
name,
|
||||
attrs,
|
||||
..
|
||||
} = &node.data
|
||||
{
|
||||
if name.local.as_ref() == "link" {
|
||||
let mut has_rel = false;
|
||||
let mut href = None;
|
||||
@@ -343,7 +419,8 @@ fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Ve
|
||||
let attr_name = attr.name.local.as_ref();
|
||||
let attr_value = attr.value.as_ref();
|
||||
|
||||
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) {
|
||||
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) && !ICON_REL_BLACKLIST.is_match(attr_value)
|
||||
{
|
||||
has_rel = true;
|
||||
} else if attr_name == "href" {
|
||||
href = Some(attr_value);
|
||||
@@ -354,7 +431,7 @@ fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Ve
|
||||
|
||||
if has_rel {
|
||||
if let Some(inner_href) = href {
|
||||
if let Ok(full_href) = url.join(&inner_href).map(|h| h.into_string()) {
|
||||
if let Ok(full_href) = url.join(inner_href).map(String::from) {
|
||||
let priority = get_icon_priority(&full_href, sizes);
|
||||
icons.push(Icon::new(priority, full_href));
|
||||
}
|
||||
@@ -371,12 +448,11 @@ fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Ve
|
||||
|
||||
struct IconUrlResult {
|
||||
iconlist: Vec<Icon>,
|
||||
cookies: String,
|
||||
referer: String,
|
||||
}
|
||||
|
||||
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
|
||||
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
|
||||
/// Returns a IconUrlResult which holds a Vector IconList and a string which holds the referer.
|
||||
/// There will always two items within the iconlist which holds http(s)://domain.tld/favicon.ico.
|
||||
/// This does not mean that that location does exists, but it is the default location browser use.
|
||||
///
|
||||
/// # Argument
|
||||
@@ -384,8 +460,8 @@ struct IconUrlResult {
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
|
||||
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
|
||||
/// let icon_result = get_icon_url("github.com")?;
|
||||
/// let icon_result = get_icon_url("vaultwarden.discourse.group")?;
|
||||
/// ```
|
||||
fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
// Default URL with secure and insecure schemes
|
||||
@@ -433,49 +509,30 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
|
||||
// Create the iconlist
|
||||
let mut iconlist: Vec<Icon> = Vec::new();
|
||||
|
||||
// Create the cookie_str to fill it all the cookies from the response
|
||||
// These cookies can be used to request/download the favicon image.
|
||||
// Some sites have extra security in place with for example XSRF Tokens.
|
||||
let mut cookie_str = "".to_string();
|
||||
let mut referer = "".to_string();
|
||||
let mut referer = String::from("");
|
||||
|
||||
if let Ok(content) = resp {
|
||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
||||
let url = content.url().clone();
|
||||
|
||||
// Get all the cookies and pass it on to the next function.
|
||||
// Needed for XSRF Cookies for example (like @ mijn.ing.nl)
|
||||
let raw_cookies = content.headers().get_all("set-cookie");
|
||||
cookie_str = raw_cookies
|
||||
.iter()
|
||||
.filter_map(|raw_cookie| raw_cookie.to_str().ok())
|
||||
.map(|cookie_str| {
|
||||
if let Ok(cookie) = Cookie::parse(cookie_str) {
|
||||
format!("{}={}; ", cookie.name(), cookie.value())
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
})
|
||||
.collect::<String>();
|
||||
|
||||
// Set the referer to be used on the final request, some sites check this.
|
||||
// Mostly used to prevent direct linking and other security resons.
|
||||
referer = url.as_str().to_string();
|
||||
|
||||
// Add the default favicon.ico to the list with the domain the content responded from.
|
||||
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
|
||||
iconlist.push(Icon::new(35, String::from(url.join("/favicon.ico").unwrap())));
|
||||
|
||||
// 512KB should be more than enough for the HTML, though as we only really need
|
||||
// the HTML header, it could potentially be reduced even further
|
||||
let mut limited_reader = content.take(512 * 1024);
|
||||
// 384KB should be more than enough for the HTML, though as we only really need the HTML header.
|
||||
let mut limited_reader = content.take(384 * 1024);
|
||||
|
||||
use html5ever::tendril::TendrilSink;
|
||||
let dom = html5ever::parse_document(markup5ever_rcdom::RcDom::default(), Default::default())
|
||||
.from_utf8()
|
||||
.read_from(&mut limited_reader)?;
|
||||
|
||||
get_favicons_node(&dom.document, &mut iconlist, &url);
|
||||
let mut base_url: url::Url = url;
|
||||
get_base_href(&dom.document, &mut base_url);
|
||||
get_favicons_node(&dom.document, &mut iconlist, &base_url);
|
||||
} else {
|
||||
// Add the default favicon.ico to the list with just the given domain
|
||||
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
|
||||
@@ -486,33 +543,27 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
iconlist.sort_by_key(|x| x.priority);
|
||||
|
||||
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
||||
Ok(IconUrlResult{
|
||||
Ok(IconUrlResult {
|
||||
iconlist,
|
||||
cookies: cookie_str,
|
||||
referer
|
||||
referer,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_page(url: &str) -> Result<Response, Error> {
|
||||
get_page_with_cookies(url, "", "")
|
||||
get_page_with_referer(url, "")
|
||||
}
|
||||
|
||||
fn get_page_with_cookies(url: &str, cookie_str: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||
fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||
err!("Favicon rel linked to a blacklisted domain!");
|
||||
}
|
||||
|
||||
let mut client = CLIENT.get(url);
|
||||
if !cookie_str.is_empty() {
|
||||
client = client.header("Cookie", cookie_str)
|
||||
}
|
||||
if !referer.is_empty() {
|
||||
client = client.header("Referer", referer)
|
||||
}
|
||||
|
||||
client.send()?
|
||||
.error_for_status()
|
||||
.map_err(Into::into)
|
||||
client.send()?.error_for_status().map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||
@@ -540,7 +591,7 @@ fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 {
|
||||
1
|
||||
} else if width == 64 {
|
||||
2
|
||||
} else if (24..=128).contains(&width) {
|
||||
} else if (24..=192).contains(&width) {
|
||||
3
|
||||
} else if width == 16 {
|
||||
4
|
||||
@@ -594,14 +645,15 @@ fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
|
||||
(width, height)
|
||||
}
|
||||
|
||||
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||
fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
if is_domain_blacklisted(domain) {
|
||||
err!("Domain is blacklisted", domain)
|
||||
}
|
||||
|
||||
let icon_result = get_icon_url(&domain)?;
|
||||
let icon_result = get_icon_url(domain)?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let mut icon_type: Option<&str> = None;
|
||||
|
||||
use data_url::DataUrl;
|
||||
|
||||
@@ -613,29 +665,43 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||
Ok((body, _fragment)) => {
|
||||
// Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create
|
||||
if body.len() >= 67 {
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
icon_type = get_icon_type(&body);
|
||||
if icon_type.is_none() {
|
||||
debug!("Icon from {} data:image uri, is not a valid image type", domain);
|
||||
continue;
|
||||
}
|
||||
info!("Extracted icon from data:image uri for {}", domain);
|
||||
buffer = body;
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ => warn!("data uri is invalid"),
|
||||
_ => warn!("Extracted icon from data:image uri is invalid"),
|
||||
};
|
||||
} else {
|
||||
match get_page_with_cookies(&icon.href, &icon_result.cookies, &icon_result.referer) {
|
||||
match get_page_with_referer(&icon.href, &icon_result.referer) {
|
||||
Ok(mut res) => {
|
||||
info!("Downloaded icon from {}", icon.href);
|
||||
res.copy_to(&mut buffer)?;
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
icon_type = get_icon_type(&buffer);
|
||||
if icon_type.is_none() {
|
||||
buffer.clear();
|
||||
debug!("Icon from {}, is not a valid image type", icon.href);
|
||||
continue;
|
||||
}
|
||||
info!("Downloaded icon from {}", icon.href);
|
||||
break;
|
||||
},
|
||||
}
|
||||
_ => warn!("Download failed for {}", icon.href),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if buffer.is_empty() {
|
||||
err!("Empty response")
|
||||
err!("Empty response downloading icon")
|
||||
}
|
||||
|
||||
Ok(buffer)
|
||||
Ok((buffer, icon_type))
|
||||
}
|
||||
|
||||
fn save_icon(path: &str, icon: &[u8]) {
|
||||
@@ -647,7 +713,65 @@ fn save_icon(path: &str, icon: &[u8]) {
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Icon save error: {:?}", e);
|
||||
warn!("Icon save error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||
match bytes {
|
||||
[137, 80, 78, 71, ..] => Some("png"),
|
||||
[0, 0, 1, 0, ..] => Some("x-icon"),
|
||||
[82, 73, 70, 70, ..] => Some("webp"),
|
||||
[255, 216, 255, ..] => Some("jpeg"),
|
||||
[71, 73, 70, 56, ..] => Some("gif"),
|
||||
[66, 77, ..] => Some("bmp"),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// This is an implementation of the default Cookie Jar from Reqwest and reqwest_cookie_store build by pfernie.
|
||||
/// The default cookie jar used by Reqwest keeps all the cookies based upon the Max-Age or Expires which could be a long time.
|
||||
/// That could be used for tracking, to prevent this we force the lifespan of the cookies to always be max two minutes.
|
||||
/// A Cookie Jar is needed because some sites force a redirect with cookies to verify if a request uses cookies or not.
|
||||
use cookie_store::CookieStore;
|
||||
#[derive(Default)]
|
||||
pub struct Jar(RwLock<CookieStore>);
|
||||
|
||||
impl reqwest::cookie::CookieStore for Jar {
|
||||
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &header::HeaderValue>, url: &url::Url) {
|
||||
use cookie::{Cookie as RawCookie, ParseError as RawCookieParseError};
|
||||
use time::Duration;
|
||||
|
||||
let mut cookie_store = self.0.write().unwrap();
|
||||
let cookies = cookie_headers.filter_map(|val| {
|
||||
std::str::from_utf8(val.as_bytes())
|
||||
.map_err(RawCookieParseError::from)
|
||||
.and_then(RawCookie::parse)
|
||||
.map(|mut c| {
|
||||
c.set_expires(None);
|
||||
c.set_max_age(Some(Duration::minutes(2)));
|
||||
c.into_owned()
|
||||
})
|
||||
.ok()
|
||||
});
|
||||
cookie_store.store_response_cookies(cookies, url);
|
||||
}
|
||||
|
||||
fn cookies(&self, url: &url::Url) -> Option<header::HeaderValue> {
|
||||
use bytes::Bytes;
|
||||
|
||||
let cookie_store = self.0.read().unwrap();
|
||||
let s = cookie_store
|
||||
.get_request_values(url)
|
||||
.map(|(name, value)| format!("{}={}", name, value))
|
||||
.collect::<Vec<_>>()
|
||||
.join("; ");
|
||||
|
||||
if s.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
||||
}
|
||||
}
|
||||
|
@@ -72,7 +72,8 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"scope": "api offline_access"
|
||||
"scope": "api offline_access",
|
||||
"unofficialServer": true,
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -87,34 +88,28 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
let username = data.username.as_ref().unwrap();
|
||||
let user = match User::find_by_mail(username, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!(
|
||||
"Username or password is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
),
|
||||
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
|
||||
};
|
||||
|
||||
// Check password
|
||||
let password = data.password.as_ref().unwrap();
|
||||
if !user.check_valid_password(password) {
|
||||
err!(
|
||||
"Username or password is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
)
|
||||
err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
}
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
)
|
||||
err!("This user has been disabled", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
}
|
||||
|
||||
let now = Local::now();
|
||||
|
||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||
let now = now.naive_utc();
|
||||
if user.last_verifying_at.is_none() || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() > CONFIG.signups_verify_resend_time() as i64 {
|
||||
if user.last_verifying_at.is_none()
|
||||
|| now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds()
|
||||
> CONFIG.signups_verify_resend_time() as i64
|
||||
{
|
||||
let resend_limit = CONFIG.signups_verify_resend_limit() as i32;
|
||||
if resend_limit == 0 || user.login_verify_count < resend_limit {
|
||||
// We want to send another email verification if we require signups to verify
|
||||
@@ -134,15 +129,12 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
}
|
||||
|
||||
// We still want the login to fail until they actually verified the email address
|
||||
err!(
|
||||
"Please verify your email before trying again.",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
)
|
||||
err!("Please verify your email before trying again.", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
}
|
||||
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &ip, &conn)?;
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, &conn)?;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
|
||||
@@ -168,11 +160,12 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
//"TwoFactorToken": "11122233333444555666777888999"
|
||||
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false,// TODO: Same as above
|
||||
"scope": "api offline_access"
|
||||
"scope": "api offline_access",
|
||||
"unofficialServer": true,
|
||||
});
|
||||
|
||||
if let Some(token) = twofactor_token {
|
||||
@@ -192,7 +185,7 @@ fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool)
|
||||
|
||||
let mut new_device = false;
|
||||
// Find device or create new
|
||||
let device = match Device::find_by_uuid(&device_id, &conn) {
|
||||
let device = match Device::find_by_uuid(&device_id, conn) {
|
||||
Some(device) => {
|
||||
// Check if owned device, and recreate if not
|
||||
if device.user_uuid != user.uuid {
|
||||
@@ -234,9 +227,7 @@ fn twofactor_auth(
|
||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA token not provided"),
|
||||
};
|
||||
|
||||
let selected_twofactor = twofactors
|
||||
.into_iter()
|
||||
.find(|tf| tf.atype == selected_id && tf.enabled);
|
||||
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||
|
||||
use crate::api::core::two_factor as _tf;
|
||||
use crate::crypto::ct_eq;
|
||||
@@ -245,18 +236,27 @@ fn twofactor_auth(
|
||||
let mut remember = data.two_factor_remember.unwrap_or(0);
|
||||
|
||||
match TwoFactorType::from_i32(selected_id) {
|
||||
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?,
|
||||
Some(TwoFactorType::Authenticator) => {
|
||||
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?
|
||||
}
|
||||
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::Webauthn) => _tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
||||
Some(TwoFactorType::Duo) => _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
|
||||
Some(TwoFactorType::Email) => _tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?,
|
||||
Some(TwoFactorType::Duo) => {
|
||||
_tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?
|
||||
}
|
||||
Some(TwoFactorType::Email) => {
|
||||
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Remember) => {
|
||||
match device.twofactor_remember {
|
||||
Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
|
||||
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
|
||||
}
|
||||
_ => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA Remember token not provided"),
|
||||
_ => {
|
||||
err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA Remember token not provided")
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => err!("Invalid two factor provider"),
|
||||
@@ -310,8 +310,13 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
});
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn)?;
|
||||
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Duo) => {
|
||||
let email = match User::find_by_uuid(user_uuid, &conn) {
|
||||
let email = match User::find_by_uuid(user_uuid, conn) {
|
||||
Some(u) => u.email,
|
||||
None => err!("User does not exist"),
|
||||
};
|
||||
@@ -325,7 +330,7 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
}
|
||||
|
||||
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, &conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) {
|
||||
Some(tf) => tf,
|
||||
None => err!("No YubiKey devices registered"),
|
||||
};
|
||||
@@ -340,14 +345,14 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
Some(tf_type @ TwoFactorType::Email) => {
|
||||
use crate::api::core::two_factor as _tf;
|
||||
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, &conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) {
|
||||
Some(tf) => tf,
|
||||
None => err!("No twofactor email registered"),
|
||||
};
|
||||
|
||||
// Send email immediately if email is the only 2FA option
|
||||
if providers.len() == 1 {
|
||||
_tf::email::send_token(&user_uuid, &conn)?
|
||||
_tf::email::send_token(user_uuid, conn)?
|
||||
}
|
||||
|
||||
let email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
|
@@ -10,8 +10,9 @@ use serde_json::Value;
|
||||
|
||||
pub use crate::api::{
|
||||
admin::routes as admin_routes,
|
||||
core::purge_sends,
|
||||
core::purge_trashed_ciphers,
|
||||
core::routes as core_routes,
|
||||
core::start_send_deletion_scheduler,
|
||||
icons::routes as icons_routes,
|
||||
identity::routes as identity_routes,
|
||||
notifications::routes as notifications_routes,
|
||||
@@ -50,13 +51,14 @@ impl NumberOrString {
|
||||
}
|
||||
}
|
||||
|
||||
fn into_i32(self) -> ApiResult<i32> {
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn into_i32(&self) -> ApiResult<i32> {
|
||||
use std::num::ParseIntError as PIE;
|
||||
match self {
|
||||
NumberOrString::Number(n) => Ok(n),
|
||||
NumberOrString::String(s) => s
|
||||
.parse()
|
||||
.map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())),
|
||||
NumberOrString::Number(n) => Ok(*n),
|
||||
NumberOrString::String(s) => {
|
||||
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -4,12 +4,7 @@ use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
use crate::{
|
||||
api::EmptyResult,
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
Error, CONFIG,
|
||||
};
|
||||
use crate::{api::EmptyResult, auth::Headers, db::DbConn, Error, CONFIG};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![negotiate, websockets_err]
|
||||
@@ -19,12 +14,16 @@ static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
||||
|
||||
#[get("/hub")]
|
||||
fn websockets_err() -> EmptyResult {
|
||||
if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok() {
|
||||
err!("
|
||||
if CONFIG.websocket_enabled()
|
||||
&& SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok()
|
||||
{
|
||||
err!(
|
||||
"
|
||||
###########################################################
|
||||
'/notifications/hub' should be proxied to the websocket server or notifications won't work.
|
||||
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
|
||||
###########################################################################################\n")
|
||||
###########################################################################################\n"
|
||||
)
|
||||
} else {
|
||||
Err(Error::empty())
|
||||
}
|
||||
@@ -204,9 +203,7 @@ impl Handler for WsHandler {
|
||||
let handler_insert = self.out.clone();
|
||||
let handler_update = self.out.clone();
|
||||
|
||||
self.users
|
||||
.map
|
||||
.upsert(user_uuid, || vec![handler_insert], |ref mut v| v.push(handler_update));
|
||||
self.users.map.upsert(user_uuid, || vec![handler_insert], |ref mut v| v.push(handler_update));
|
||||
|
||||
// Schedule a ping to keep the connection alive
|
||||
self.out.timeout(PING_MS, PING)
|
||||
@@ -216,7 +213,11 @@ impl Handler for WsHandler {
|
||||
if let Message::Text(text) = msg.clone() {
|
||||
let json = &text[..text.len() - 1]; // Remove last char
|
||||
|
||||
if let Ok(InitialMessage { protocol, version }) = from_str::<InitialMessage>(json) {
|
||||
if let Ok(InitialMessage {
|
||||
protocol,
|
||||
version,
|
||||
}) = from_str::<InitialMessage>(json)
|
||||
{
|
||||
if &protocol == "messagepack" && version == 1 {
|
||||
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
|
||||
}
|
||||
@@ -295,10 +296,7 @@ impl WebSocketUsers {
|
||||
// NOTE: The last modified date needs to be updated before calling these methods
|
||||
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
let data = create_update(
|
||||
vec![
|
||||
("UserId".into(), user.uuid.clone().into()),
|
||||
("Date".into(), serialize_date(user.updated_at)),
|
||||
],
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
ut,
|
||||
);
|
||||
|
||||
@@ -334,7 +332,7 @@ impl WebSocketUsers {
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(&uuid, &data).ok();
|
||||
self.send_update(uuid, &data).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -410,12 +408,10 @@ pub fn start_notification_server() -> WebSocketUsers {
|
||||
|
||||
if CONFIG.websocket_enabled() {
|
||||
thread::spawn(move || {
|
||||
let settings = ws::Settings {
|
||||
max_connections: 500,
|
||||
queue_size: 2,
|
||||
panic_on_internal: false,
|
||||
..Default::default()
|
||||
};
|
||||
let mut settings = ws::Settings::default();
|
||||
settings.max_connections = 500;
|
||||
settings.queue_size = 2;
|
||||
settings.panic_on_internal = false;
|
||||
|
||||
ws::Builder::new()
|
||||
.with_settings(settings)
|
||||
|
@@ -4,13 +4,17 @@ use rocket::{http::ContentType, response::content::Content, response::NamedFile,
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{error::Error, util::Cached, CONFIG};
|
||||
use crate::{
|
||||
error::Error,
|
||||
util::{Cached, SafeString},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
// If addding more routes here, consider also adding them to
|
||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||
if CONFIG.web_vault_enabled() {
|
||||
routes![web_index, app_id, web_files, attachments, sends, alive, static_files]
|
||||
routes![web_index, app_id, web_files, attachments, alive, static_files]
|
||||
} else {
|
||||
routes![attachments, alive, static_files]
|
||||
}
|
||||
@@ -55,14 +59,9 @@ fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).ok())
|
||||
}
|
||||
|
||||
#[get("/attachments/<uuid>/<file..>")]
|
||||
fn attachments(uuid: String, file: PathBuf) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file)).ok()
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>")]
|
||||
fn sends(send_id: String, file_id: String) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).ok()
|
||||
#[get("/attachments/<uuid>/<file_id>")]
|
||||
fn attachments(uuid: SafeString, file_id: SafeString) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).ok()
|
||||
}
|
||||
|
||||
#[get("/alive")]
|
||||
@@ -78,16 +77,22 @@ fn static_files(filename: String) -> Result<Content<&'static [u8]>, Error> {
|
||||
match filename.as_ref() {
|
||||
"mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||
"logo-gray.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||
"shield-white.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/shield-white.png"))),
|
||||
"error-x.svg" => Ok(Content(ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
|
||||
"hibp.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
"vaultwarden-icon.png" => {
|
||||
Ok(Content(ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png")))
|
||||
}
|
||||
|
||||
"bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||
"bootstrap-native.js" => {
|
||||
Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js")))
|
||||
}
|
||||
"identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
|
||||
"datatables.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.5.1.slim.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.5.1.slim.js"))),
|
||||
"jquery-3.6.0.slim.js" => {
|
||||
Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.0.slim.js")))
|
||||
}
|
||||
_ => err!(format!("Static file not found: {}", filename)),
|
||||
}
|
||||
}
|
||||
|
137
src/auth.rs
@@ -19,22 +19,34 @@ const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||
|
||||
pub static DEFAULT_VALIDITY: Lazy<Duration> = Lazy::new(|| Duration::hours(2));
|
||||
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
|
||||
|
||||
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
|
||||
static JWT_INVITE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|invite", CONFIG.domain_origin()));
|
||||
static JWT_DELETE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|delete", CONFIG.domain_origin()));
|
||||
static JWT_VERIFYEMAIL_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|verifyemail", CONFIG.domain_origin()));
|
||||
static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.domain_origin()));
|
||||
static PRIVATE_RSA_KEY: Lazy<Vec<u8>> = Lazy::new(|| match read_file(&CONFIG.private_rsa_key()) {
|
||||
Ok(key) => key,
|
||||
Err(e) => panic!("Error loading private RSA Key.\n Error: {}", e),
|
||||
static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.domain_origin()));
|
||||
|
||||
static PRIVATE_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
read_file(&CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key.\n{}", e))
|
||||
});
|
||||
static PUBLIC_RSA_KEY: Lazy<Vec<u8>> = Lazy::new(|| match read_file(&CONFIG.public_rsa_key()) {
|
||||
Ok(key) => key,
|
||||
Err(e) => panic!("Error loading public RSA Key.\n Error: {}", e),
|
||||
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| {
|
||||
EncodingKey::from_rsa_pem(&PRIVATE_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{}", e))
|
||||
});
|
||||
static PUBLIC_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
read_file(&CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key.\n{}", e))
|
||||
});
|
||||
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
|
||||
DecodingKey::from_rsa_pem(&PUBLIC_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{}", e))
|
||||
});
|
||||
|
||||
pub fn load_keys() {
|
||||
Lazy::force(&PRIVATE_RSA_KEY);
|
||||
Lazy::force(&PUBLIC_RSA_KEY);
|
||||
}
|
||||
|
||||
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &EncodingKey::from_rsa_der(&PRIVATE_RSA_KEY)) {
|
||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
||||
Ok(token) => token,
|
||||
Err(e) => panic!("Error encoding jwt {}", e),
|
||||
}
|
||||
@@ -52,10 +64,7 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
|
||||
};
|
||||
|
||||
let token = token.replace(char::is_whitespace, "");
|
||||
|
||||
jsonwebtoken::decode(&token, &DecodingKey::from_rsa_der(&PUBLIC_RSA_KEY), &validation)
|
||||
.map(|d| d.claims)
|
||||
.map_res("Error decoding JWT")
|
||||
jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation).map(|d| d.claims).map_res("Error decoding JWT")
|
||||
}
|
||||
|
||||
pub fn decode_login(token: &str) -> Result<LoginJwtClaims, Error> {
|
||||
@@ -66,18 +75,22 @@ pub fn decode_invite(token: &str) -> Result<InviteJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_INVITE_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_delete(token: &str) -> Result<DeleteJwtClaims, Error> {
|
||||
pub fn decode_delete(token: &str) -> Result<BasicJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_DELETE_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_verify_email(token: &str) -> Result<VerifyEmailJwtClaims, Error> {
|
||||
pub fn decode_verify_email(token: &str) -> Result<BasicJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_VERIFYEMAIL_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_admin(token: &str) -> Result<AdminJwtClaims, Error> {
|
||||
pub fn decode_admin(token: &str) -> Result<BasicJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_ADMIN_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_send(token: &str) -> Result<BasicJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_SEND_ISSUER.to_string())
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoginJwtClaims {
|
||||
// Not before
|
||||
@@ -147,7 +160,7 @@ pub fn generate_invite_claims(
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DeleteJwtClaims {
|
||||
pub struct BasicJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -158,9 +171,9 @@ pub struct DeleteJwtClaims {
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_delete_claims(uuid: String) -> DeleteJwtClaims {
|
||||
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
DeleteJwtClaims {
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
iss: JWT_DELETE_ISSUER.to_string(),
|
||||
@@ -168,21 +181,9 @@ pub fn generate_delete_claims(uuid: String) -> DeleteJwtClaims {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifyEmailJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
pub exp: i64,
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_verify_email_claims(uuid: String) -> DeleteJwtClaims {
|
||||
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
DeleteJwtClaims {
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||
@@ -190,21 +191,9 @@ pub fn generate_verify_email_claims(uuid: String) -> DeleteJwtClaims {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AdminJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
pub exp: i64,
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_admin_claims() -> AdminJwtClaims {
|
||||
pub fn generate_admin_claims() -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
AdminJwtClaims {
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(20)).timestamp(),
|
||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||
@@ -212,6 +201,16 @@ pub fn generate_admin_claims() -> AdminJwtClaims {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(2)).timestamp(),
|
||||
iss: JWT_SEND_ISSUER.to_string(),
|
||||
sub: format!("{}/{}", send_id, file_id),
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Bearer token authentication
|
||||
//
|
||||
@@ -223,10 +222,9 @@ use crate::db::{
|
||||
};
|
||||
|
||||
pub struct Host {
|
||||
pub host: String
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Host {
|
||||
type Error = &'static str;
|
||||
|
||||
@@ -261,7 +259,9 @@ impl<'a, 'r> FromRequest<'a, 'r> for Host {
|
||||
format!("{}://{}", protocol, host)
|
||||
};
|
||||
|
||||
Outcome::Success(Host { host })
|
||||
Outcome::Success(Host {
|
||||
host,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,18 +317,27 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
};
|
||||
|
||||
if user.security_stamp != claims.sstamp {
|
||||
if let Some(stamp_exception) = user
|
||||
.stamp_exception
|
||||
.as_deref()
|
||||
.and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||
if let Some(stamp_exception) =
|
||||
user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||
{
|
||||
let current_route = match request.route().and_then(|r| r.name) {
|
||||
Some(name) => name,
|
||||
_ => err_handler!("Error getting current route for stamp exception"),
|
||||
};
|
||||
|
||||
// Check if both match, if not this route is not allowed with the current security stamp.
|
||||
if stamp_exception.route != current_route {
|
||||
// Check if the stamp exception has expired first.
|
||||
// Then, check if the current route matches any of the allowed routes.
|
||||
// After that check the stamp in exception matches the one in the claims.
|
||||
if Utc::now().naive_utc().timestamp() > stamp_exception.expire {
|
||||
// If the stamp exception has been expired remove it from the database.
|
||||
// This prevents checking this stamp exception for new requests.
|
||||
let mut user = user;
|
||||
user.reset_stamp_exception();
|
||||
if let Err(e) = user.save(&conn) {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
}
|
||||
err_handler!("Stamp exception is expired")
|
||||
} else if !stamp_exception.routes.contains(¤t_route.to_string()) {
|
||||
err_handler!("Invalid security stamp: Current route and exception route do not match")
|
||||
} else if stamp_exception.security_stamp != claims.sstamp {
|
||||
err_handler!("Invalid security stamp for matched stamp exception")
|
||||
@@ -338,7 +347,11 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
}
|
||||
}
|
||||
|
||||
Outcome::Success(Headers { host, device, user })
|
||||
Outcome::Success(Headers {
|
||||
host,
|
||||
device,
|
||||
user,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -506,7 +519,11 @@ impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeaders {
|
||||
};
|
||||
|
||||
if !headers.org_user.has_full_access() {
|
||||
match CollectionUser::find_by_collection_and_user(&col_id, &headers.org_user.user_uuid, &conn) {
|
||||
match CollectionUser::find_by_collection_and_user(
|
||||
&col_id,
|
||||
&headers.org_user.user_uuid,
|
||||
&conn,
|
||||
) {
|
||||
Some(_) => (),
|
||||
None => err_handler!("The current user isn't a manager for this collection"),
|
||||
}
|
||||
@@ -636,10 +653,10 @@ impl<'a, 'r> FromRequest<'a, 'r> for ClientIp {
|
||||
None
|
||||
};
|
||||
|
||||
let ip = ip
|
||||
.or_else(|| req.remote().map(|r| r.ip()))
|
||||
.unwrap_or_else(|| "0.0.0.0".parse().unwrap());
|
||||
let ip = ip.or_else(|| req.remote().map(|r| r.ip())).unwrap_or_else(|| "0.0.0.0".parse().unwrap());
|
||||
|
||||
Outcome::Success(ClientIp { ip })
|
||||
Outcome::Success(ClientIp {
|
||||
ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
113
src/config.rs
@@ -57,6 +57,8 @@ macro_rules! make_config {
|
||||
|
||||
_env: ConfigBuilder,
|
||||
_usr: ConfigBuilder,
|
||||
|
||||
_overrides: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
|
||||
@@ -79,20 +81,16 @@ macro_rules! make_config {
|
||||
dotenv::Error::Io(ioerr) => match ioerr.kind() {
|
||||
std::io::ErrorKind::NotFound => {
|
||||
println!("[INFO] No .env file found.\n");
|
||||
()
|
||||
},
|
||||
std::io::ErrorKind::PermissionDenied => {
|
||||
println!("[WARNING] Permission Denied while trying to read the .env file!\n");
|
||||
()
|
||||
},
|
||||
_ => {
|
||||
println!("[WARNING] Reading the .env file failed:\n{:?}\n", ioerr);
|
||||
()
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
println!("[WARNING] Reading the .env file failed:\n{:?}\n", e);
|
||||
()
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -113,8 +111,7 @@ macro_rules! make_config {
|
||||
|
||||
/// Merges the values of both builders into a new builder.
|
||||
/// If both have the same element, `other` wins.
|
||||
fn merge(&self, other: &Self, show_overrides: bool) -> Self {
|
||||
let mut overrides = Vec::new();
|
||||
fn merge(&self, other: &Self, show_overrides: bool, overrides: &mut Vec<String>) -> Self {
|
||||
let mut builder = self.clone();
|
||||
$($(
|
||||
if let v @Some(_) = &other.$name {
|
||||
@@ -176,9 +173,9 @@ macro_rules! make_config {
|
||||
)+)+
|
||||
|
||||
pub fn prepare_json(&self) -> serde_json::Value {
|
||||
let (def, cfg) = {
|
||||
let (def, cfg, overriden) = {
|
||||
let inner = &self.inner.read().unwrap();
|
||||
(inner._env.build(), inner.config.clone())
|
||||
(inner._env.build(), inner.config.clone(), inner._overrides.clone())
|
||||
};
|
||||
|
||||
fn _get_form_type(rust_type: &str) -> &'static str {
|
||||
@@ -210,6 +207,7 @@ macro_rules! make_config {
|
||||
"default": def.$name,
|
||||
"type": _get_form_type(stringify!($ty)),
|
||||
"doc": _get_doc(concat!($($doc),+)),
|
||||
"overridden": overriden.contains(&stringify!($name).to_uppercase()),
|
||||
}, )+
|
||||
]}, )+ ])
|
||||
}
|
||||
@@ -224,6 +222,15 @@ macro_rules! make_config {
|
||||
stringify!($name): make_config!{ @supportstr $name, cfg.$name, $ty, $none_action },
|
||||
)+)+ })
|
||||
}
|
||||
|
||||
pub fn get_overrides(&self) -> Vec<String> {
|
||||
let overrides = {
|
||||
let inner = &self.inner.read().unwrap();
|
||||
inner._overrides.clone()
|
||||
};
|
||||
|
||||
overrides
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -316,6 +323,17 @@ make_config! {
|
||||
/// Websocket port
|
||||
websocket_port: u16, false, def, 3012;
|
||||
},
|
||||
jobs {
|
||||
/// Job scheduler poll interval |> How often the job scheduler thread checks for jobs to run.
|
||||
/// Set to 0 to globally disable scheduled jobs.
|
||||
job_poll_interval_ms: u64, false, def, 30_000;
|
||||
/// Send purge schedule |> Cron schedule of the job that checks for Sends past their deletion date.
|
||||
/// Defaults to hourly. Set blank to disable this job.
|
||||
send_purge_schedule: String, false, def, "0 5 * * * *".to_string();
|
||||
/// Trash purge schedule |> Cron schedule of the job that checks for trashed items to delete permanently.
|
||||
/// Defaults to daily. Set blank to disable this job.
|
||||
trash_purge_schedule: String, false, def, "0 5 0 * * *".to_string();
|
||||
},
|
||||
|
||||
/// General settings
|
||||
settings {
|
||||
@@ -331,19 +349,28 @@ make_config! {
|
||||
/// Enable web vault
|
||||
web_vault_enabled: bool, false, def, true;
|
||||
|
||||
/// Allow Sends |> Controls whether users are allowed to create Bitwarden Sends.
|
||||
/// This setting applies globally to all users. To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||
sends_allowed: bool, true, def, true;
|
||||
|
||||
/// HIBP Api Key |> HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||
hibp_api_key: Pass, true, option;
|
||||
|
||||
/// Per-user attachment limit (KB) |> Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
||||
/// Per-user attachment storage limit (KB) |> Max kilobytes of attachment storage allowed per user. When this limit is reached, the user will not be allowed to upload further attachments.
|
||||
user_attachment_limit: i64, true, option;
|
||||
/// Per-organization attachment limit (KB) |> Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||
/// Per-organization attachment storage limit (KB) |> Max kilobytes of attachment storage allowed per org. When this limit is reached, org members will not be allowed to upload further attachments for ciphers owned by that org.
|
||||
org_attachment_limit: i64, true, option;
|
||||
|
||||
/// Trash auto-delete days |> Number of days to wait before auto-deleting a trashed item.
|
||||
/// If unset, trashed items are not auto-deleted. This setting applies globally, so make
|
||||
/// sure to inform all users of any changes to this setting.
|
||||
trash_auto_delete_days: i64, true, option;
|
||||
|
||||
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from
|
||||
/// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||
/// otherwise it will delete them and they won't be downloaded again.
|
||||
disable_icon_download: bool, true, def, false;
|
||||
/// Allow new signups |> Controls whether new users can register. Users can be invited by the bitwarden_rs admin even if this is disabled
|
||||
/// Allow new signups |> Controls whether new users can register. Users can be invited by the vaultwarden admin even if this is disabled
|
||||
signups_allowed: bool, true, def, true;
|
||||
/// Require email verification on signups. This will prevent logins from succeeding until the address has been verified
|
||||
signups_verify: bool, true, def, false;
|
||||
@@ -361,15 +388,16 @@ make_config! {
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations.
|
||||
/// The changes only apply when a user changes their password. Not recommended to lower the value
|
||||
password_iterations: i32, true, def, 100_000;
|
||||
/// Show password hints |> Controls if the password hint should be shown directly in the web page.
|
||||
/// Otherwise, if email is disabled, there is no way to see the password hint
|
||||
show_password_hint: bool, true, def, true;
|
||||
/// Show password hint |> Controls whether a password hint should be shown directly in the web page
|
||||
/// if SMTP service is not configured. Not recommended for publicly-accessible instances as this
|
||||
/// provides unauthenticated access to potentially sensitive data.
|
||||
show_password_hint: bool, true, def, false;
|
||||
|
||||
/// Admin page token |> The token used to authenticate in this very same page. Changing it here won't deauthorize the current session
|
||||
admin_token: Pass, true, option;
|
||||
|
||||
/// Invitation organization name |> Name shown in the invitation emails that don't come from a specific organization
|
||||
invitation_org_name: String, true, def, "Bitwarden_RS".to_string();
|
||||
invitation_org_name: String, true, def, "Vaultwarden".to_string();
|
||||
},
|
||||
|
||||
/// Advanced settings
|
||||
@@ -418,7 +446,7 @@ make_config! {
|
||||
/// Log level
|
||||
log_level: String, false, def, "Info".to_string();
|
||||
|
||||
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using bitwarden_rs on some exotic filesystems,
|
||||
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using vaultwarden on some exotic filesystems,
|
||||
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
||||
enable_db_wal: bool, false, def, true;
|
||||
|
||||
@@ -473,7 +501,7 @@ make_config! {
|
||||
/// From Address
|
||||
smtp_from: String, true, def, String::new();
|
||||
/// From Name
|
||||
smtp_from_name: String, true, def, "Bitwarden_RS".to_string();
|
||||
smtp_from_name: String, true, def, "Vaultwarden".to_string();
|
||||
/// Username
|
||||
smtp_username: String, true, option;
|
||||
/// Password
|
||||
@@ -485,7 +513,7 @@ make_config! {
|
||||
/// Server name sent during HELO |> By default this value should be is on the machine's hostname, but might need to be changed in case it trips some anti-spam filters
|
||||
helo_name: String, true, option;
|
||||
/// Enable SMTP debugging (Know the risks!) |> DANGEROUS: Enabling this will output very detailed SMTP messages. This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||
smtp_debug: bool, true, def, false;
|
||||
smtp_debug: bool, false, def, false;
|
||||
/// Accept Invalid Certs (Know the risks!) |> DANGEROUS: Allow invalid certificates. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
smtp_accept_invalid_certs: bool, true, def, false;
|
||||
/// Accept Invalid Hostnames (Know the risks!) |> DANGEROUS: Allow invalid hostnames. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
@@ -511,10 +539,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
|
||||
let limit = 256;
|
||||
if cfg.database_max_conns < 1 || cfg.database_max_conns > limit {
|
||||
err!(format!(
|
||||
"`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {}.",
|
||||
limit,
|
||||
));
|
||||
err!(format!("`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {}.", limit,));
|
||||
}
|
||||
|
||||
let dom = cfg.domain.to_lowercase();
|
||||
@@ -582,7 +607,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
|
||||
// Check if the icon blacklist regex is valid
|
||||
if let Some(ref r) = cfg.icon_blacklist_regex {
|
||||
let validate_regex = Regex::new(&r);
|
||||
let validate_regex = Regex::new(r);
|
||||
match validate_regex {
|
||||
Ok(_) => (),
|
||||
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {:#?}", e)),
|
||||
@@ -622,7 +647,8 @@ impl Config {
|
||||
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default();
|
||||
|
||||
// Create merged config, config file overwrites env
|
||||
let builder = _env.merge(&_usr, true);
|
||||
let mut _overrides = Vec::new();
|
||||
let builder = _env.merge(&_usr, true, &mut _overrides);
|
||||
|
||||
// Fill any missing with defaults
|
||||
let config = builder.build();
|
||||
@@ -634,6 +660,7 @@ impl Config {
|
||||
config,
|
||||
_env,
|
||||
_usr,
|
||||
_overrides,
|
||||
}),
|
||||
})
|
||||
}
|
||||
@@ -649,9 +676,10 @@ impl Config {
|
||||
let config_str = serde_json::to_string_pretty(&builder)?;
|
||||
|
||||
// Prepare the combined config
|
||||
let mut overrides = Vec::new();
|
||||
let config = {
|
||||
let env = &self.inner.read().unwrap()._env;
|
||||
env.merge(&builder, false).build()
|
||||
env.merge(&builder, false, &mut overrides).build()
|
||||
};
|
||||
validate_config(&config)?;
|
||||
|
||||
@@ -660,6 +688,7 @@ impl Config {
|
||||
let mut writer = self.inner.write().unwrap();
|
||||
writer.config = config;
|
||||
writer._usr = builder;
|
||||
writer._overrides = overrides;
|
||||
}
|
||||
|
||||
//Save to file
|
||||
@@ -673,7 +702,8 @@ impl Config {
|
||||
pub fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
let builder = {
|
||||
let usr = &self.inner.read().unwrap()._usr;
|
||||
usr.merge(&other, false)
|
||||
let mut _overrides = Vec::new();
|
||||
usr.merge(&other, false, &mut _overrides)
|
||||
};
|
||||
self.update_config(builder)
|
||||
}
|
||||
@@ -734,19 +764,17 @@ impl Config {
|
||||
let mut writer = self.inner.write().unwrap();
|
||||
writer.config = config;
|
||||
writer._usr = usr;
|
||||
writer._overrides = Vec::new();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn private_rsa_key(&self) -> String {
|
||||
format!("{}.der", CONFIG.rsa_key_filename())
|
||||
}
|
||||
pub fn private_rsa_key_pem(&self) -> String {
|
||||
format!("{}.pem", CONFIG.rsa_key_filename())
|
||||
}
|
||||
pub fn public_rsa_key(&self) -> String {
|
||||
format!("{}.pub.der", CONFIG.rsa_key_filename())
|
||||
format!("{}.pub.pem", CONFIG.rsa_key_filename())
|
||||
}
|
||||
pub fn mail_enabled(&self) -> bool {
|
||||
let inner = &self.inner.read().unwrap().config;
|
||||
@@ -819,6 +847,10 @@ where
|
||||
}
|
||||
|
||||
// First register default templates here
|
||||
reg!("email/email_header");
|
||||
reg!("email/email_footer");
|
||||
reg!("email/email_footer_text");
|
||||
|
||||
reg!("email/change_email", ".html");
|
||||
reg!("email/delete_account", ".html");
|
||||
reg!("email/invite_accepted", ".html");
|
||||
@@ -826,6 +858,7 @@ where
|
||||
reg!("email/new_device_logged_in", ".html");
|
||||
reg!("email/pw_hint_none", ".html");
|
||||
reg!("email/pw_hint_some", ".html");
|
||||
reg!("email/send_2fa_removed_from_org", ".html");
|
||||
reg!("email/send_org_invite", ".html");
|
||||
reg!("email/twofactor_email", ".html");
|
||||
reg!("email/verify_email", ".html");
|
||||
@@ -855,9 +888,7 @@ fn case_helper<'reg, 'rc>(
|
||||
rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h
|
||||
.param(0)
|
||||
.ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?;
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?;
|
||||
let value = param.value().clone();
|
||||
|
||||
if h.params().iter().skip(1).any(|x| x.value() == &value) {
|
||||
@@ -874,21 +905,15 @@ fn js_escape_helper<'reg, 'rc>(
|
||||
_rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h
|
||||
.param(0)
|
||||
.ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
||||
|
||||
let no_quote = h
|
||||
.param(1)
|
||||
.is_some();
|
||||
let no_quote = h.param(1).is_some();
|
||||
|
||||
let value = param
|
||||
.value()
|
||||
.as_str()
|
||||
.ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
||||
let value =
|
||||
param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
||||
|
||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||
if ! no_quote {
|
||||
if !no_quote {
|
||||
escaped_value = format!(""{}"", escaped_value);
|
||||
}
|
||||
|
||||
|
@@ -3,6 +3,7 @@
|
||||
//
|
||||
use std::num::NonZeroU32;
|
||||
|
||||
use data_encoding::HEXLOWER;
|
||||
use ring::{digest, hmac, pbkdf2};
|
||||
|
||||
use crate::error::Error;
|
||||
@@ -28,8 +29,6 @@ pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterati
|
||||
// HMAC
|
||||
//
|
||||
pub fn hmac_sign(key: &str, data: &str) -> String {
|
||||
use data_encoding::HEXLOWER;
|
||||
|
||||
let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, key.as_bytes());
|
||||
let signature = hmac::sign(&key, data.as_bytes());
|
||||
|
||||
@@ -47,13 +46,25 @@ pub fn get_random_64() -> Vec<u8> {
|
||||
pub fn get_random(mut array: Vec<u8>) -> Vec<u8> {
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
|
||||
SystemRandom::new()
|
||||
.fill(&mut array)
|
||||
.expect("Error generating random values");
|
||||
SystemRandom::new().fill(&mut array).expect("Error generating random values");
|
||||
|
||||
array
|
||||
}
|
||||
|
||||
pub fn generate_id(num_bytes: usize) -> String {
|
||||
HEXLOWER.encode(&get_random(vec![0; num_bytes]))
|
||||
}
|
||||
|
||||
pub fn generate_send_id() -> String {
|
||||
// Send IDs are globally scoped, so make them longer to avoid collisions.
|
||||
generate_id(32) // 256 bits
|
||||
}
|
||||
|
||||
pub fn generate_attachment_id() -> String {
|
||||
// Attachment IDs are scoped to a cipher, so they can be smaller.
|
||||
generate_id(10) // 80 bits
|
||||
}
|
||||
|
||||
pub fn generate_token(token_size: u32) -> Result<String, Error> {
|
||||
// A u64 can represent all whole numbers up to 19 digits long.
|
||||
if token_size > 19 {
|
||||
|
110
src/db/mod.rs
@@ -1,6 +1,3 @@
|
||||
use std::process::Command;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
|
||||
use rocket::{
|
||||
http::Status,
|
||||
@@ -25,7 +22,6 @@ pub mod __mysql_schema;
|
||||
#[path = "schemas/postgresql/schema.rs"]
|
||||
pub mod __postgresql_schema;
|
||||
|
||||
|
||||
// This is used to generate the main DbConn and DbPool enums, which contain one variant for each database supported
|
||||
macro_rules! generate_connections {
|
||||
( $( $name:ident: $ty:ty ),+ ) => {
|
||||
@@ -110,7 +106,6 @@ impl DbConnType {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! db_run {
|
||||
// Same for all dbs
|
||||
@@ -119,7 +114,7 @@ macro_rules! db_run {
|
||||
};
|
||||
|
||||
// Different code for each db
|
||||
( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {
|
||||
( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
match $conn {
|
||||
$($(
|
||||
@@ -133,7 +128,7 @@ macro_rules! db_run {
|
||||
$body
|
||||
},
|
||||
)+)+
|
||||
}
|
||||
}}
|
||||
};
|
||||
|
||||
// Same for all dbs
|
||||
@@ -144,6 +139,7 @@ macro_rules! db_run {
|
||||
// Different code for each db
|
||||
( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
#[allow(unused_variables)]
|
||||
match $conn {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
@@ -155,13 +151,30 @@ macro_rules! db_run {
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
pub trait FromDb {
|
||||
type Output;
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn from_db(self) -> Self::Output;
|
||||
}
|
||||
|
||||
impl<T: FromDb> FromDb for Vec<T> {
|
||||
type Output = Vec<T::Output>;
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
#[inline(always)]
|
||||
fn from_db(self) -> Self::Output {
|
||||
self.into_iter().map(crate::db::FromDb::from_db).collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: FromDb> FromDb for Option<T> {
|
||||
type Output = Option<T::Output>;
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
#[inline(always)]
|
||||
fn from_db(self) -> Self::Output {
|
||||
self.map(crate::db::FromDb::from_db)
|
||||
}
|
||||
}
|
||||
|
||||
// For each struct eg. Cipher, we create a CipherDb inside a module named __$db_model (where $db is sqlite, mysql or postgresql),
|
||||
// to implement the Diesel traits. We also provide methods to convert between them and the basic structs. Later, that module will be auto imported when using db_run!
|
||||
#[macro_export]
|
||||
@@ -202,18 +215,9 @@ macro_rules! db_object {
|
||||
|
||||
impl crate::db::FromDb for [<$name Db>] {
|
||||
type Output = super::$name;
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
#[inline(always)] fn from_db(self) -> Self::Output { super::$name { $( $field: self.$field, )+ } }
|
||||
}
|
||||
|
||||
impl crate::db::FromDb for Vec<[<$name Db>]> {
|
||||
type Output = Vec<super::$name>;
|
||||
#[inline(always)] fn from_db(self) -> Self::Output { self.into_iter().map(crate::db::FromDb::from_db).collect() }
|
||||
}
|
||||
|
||||
impl crate::db::FromDb for Option<[<$name Db>]> {
|
||||
type Output = Option<super::$name>;
|
||||
#[inline(always)] fn from_db(self) -> Self::Output { self.map(crate::db::FromDb::from_db) }
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -221,51 +225,34 @@ macro_rules! db_object {
|
||||
// Reexport the models, needs to be after the macros are defined so it can access them
|
||||
pub mod models;
|
||||
|
||||
/// Creates a back-up of the database using sqlite3
|
||||
pub fn backup_database() -> Result<(), Error> {
|
||||
use std::path::Path;
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = Path::new(&db_url).parent().unwrap();
|
||||
|
||||
let now: DateTime<Utc> = Utc::now();
|
||||
let file_date = now.format("%Y%m%d").to_string();
|
||||
let backup_command: String = format!("{}{}{}", ".backup 'db_", file_date, ".sqlite3'");
|
||||
|
||||
Command::new("sqlite3")
|
||||
.current_dir(db_path)
|
||||
.args(&["db.sqlite3", &backup_command])
|
||||
.output()
|
||||
.expect("Can't open database, sqlite3 is not available, make sure it's installed and available on the PATH");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
use diesel::sql_types::Text;
|
||||
#[derive(QueryableByName,Debug)]
|
||||
struct SqlVersion {
|
||||
#[sql_type = "Text"]
|
||||
version: String,
|
||||
/// Creates a back-up of the sqlite database
|
||||
/// MySQL/MariaDB and PostgreSQL are not supported.
|
||||
pub fn backup_database(conn: &DbConn) -> Result<(), Error> {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
||||
}
|
||||
sqlite {
|
||||
use std::path::Path;
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy();
|
||||
let file_date = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
diesel::sql_query(format!("VACUUM INTO '{}/db_{}.sqlite3'", db_path, file_date)).execute(conn)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the SQL Server version
|
||||
pub fn get_sql_server_version(conn: &DbConn) -> String {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
match diesel::sql_query("SELECT version() AS version;").get_result::<SqlVersion>(conn).ok() {
|
||||
Some(v) => {
|
||||
v.version
|
||||
},
|
||||
_ => "Unknown".to_string()
|
||||
}
|
||||
no_arg_sql_function!(version, diesel::sql_types::Text);
|
||||
diesel::select(version).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
}
|
||||
sqlite {
|
||||
match diesel::sql_query("SELECT sqlite_version() AS version;").get_result::<SqlVersion>(conn).ok() {
|
||||
Some(v) => {
|
||||
v.version
|
||||
},
|
||||
_ => "Unknown".to_string()
|
||||
}
|
||||
no_arg_sql_function!(sqlite_version, diesel::sql_types::Text);
|
||||
diesel::select(sqlite_version).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -308,8 +295,7 @@ mod sqlite_migrations {
|
||||
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection =
|
||||
diesel::sqlite::SqliteConnection::establish(&crate::CONFIG.database_url())?;
|
||||
let connection = diesel::sqlite::SqliteConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
|
||||
// Scoped to a connection.
|
||||
@@ -319,9 +305,7 @@ mod sqlite_migrations {
|
||||
|
||||
// Turn on WAL in SQLite
|
||||
if crate::CONFIG.enable_db_wal() {
|
||||
diesel::sql_query("PRAGMA journal_mode=wal")
|
||||
.execute(&connection)
|
||||
.expect("Failed to turn on WAL");
|
||||
diesel::sql_query("PRAGMA journal_mode=wal").execute(&connection).expect("Failed to turn on WAL");
|
||||
}
|
||||
|
||||
embedded_migrations::run_with_output(&connection, &mut std::io::stdout())?;
|
||||
@@ -337,8 +321,7 @@ mod mysql_migrations {
|
||||
pub fn run_migrations() -> Result<(), super::Error> {
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection =
|
||||
diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?;
|
||||
let connection = diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
|
||||
// Scoped to a connection/session.
|
||||
@@ -359,8 +342,7 @@ mod postgresql_migrations {
|
||||
pub fn run_migrations() -> Result<(), super::Error> {
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection =
|
||||
diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
||||
let connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
|
||||
// FIXME: Per https://www.postgresql.org/docs/12/sql-set-constraints.html,
|
||||
|
@@ -1,3 +1,5 @@
|
||||
use std::io::ErrorKind;
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
use super::Cipher;
|
||||
@@ -12,7 +14,7 @@ db_object! {
|
||||
pub struct Attachment {
|
||||
pub id: String,
|
||||
pub cipher_uuid: String,
|
||||
pub file_name: String,
|
||||
pub file_name: String, // encrypted
|
||||
pub file_size: i32,
|
||||
pub akey: Option<String>,
|
||||
}
|
||||
@@ -20,13 +22,13 @@ db_object! {
|
||||
|
||||
/// Local methods
|
||||
impl Attachment {
|
||||
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i32) -> Self {
|
||||
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i32, akey: Option<String>) -> Self {
|
||||
Self {
|
||||
id,
|
||||
cipher_uuid,
|
||||
file_name,
|
||||
file_size,
|
||||
akey: None,
|
||||
akey,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,18 +36,17 @@ impl Attachment {
|
||||
format!("{}/{}/{}", CONFIG.attachments_folder(), self.cipher_uuid, self.id)
|
||||
}
|
||||
|
||||
pub fn get_url(&self, host: &str) -> String {
|
||||
format!("{}/attachments/{}/{}", host, self.cipher_uuid, self.id)
|
||||
}
|
||||
|
||||
pub fn to_json(&self, host: &str) -> Value {
|
||||
use crate::util::get_display_size;
|
||||
|
||||
let web_path = format!("{}/attachments/{}/{}", host, self.cipher_uuid, self.id);
|
||||
let display_size = get_display_size(self.file_size);
|
||||
|
||||
json!({
|
||||
"Id": self.id,
|
||||
"Url": web_path,
|
||||
"Url": self.get_url(host),
|
||||
"FileName": self.file_name,
|
||||
"Size": self.file_size.to_string(),
|
||||
"SizeName": display_size,
|
||||
"SizeName": crate::util::get_display_size(self.file_size),
|
||||
"Key": self.akey,
|
||||
"Object": "attachment"
|
||||
})
|
||||
@@ -59,7 +60,6 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Attachment {
|
||||
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -92,7 +92,7 @@ impl Attachment {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
crate::util::retry(
|
||||
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
||||
@@ -100,14 +100,25 @@ impl Attachment {
|
||||
)
|
||||
.map_res("Error deleting attachment")?;
|
||||
|
||||
crate::util::delete_file(&self.get_file_path())?;
|
||||
Ok(())
|
||||
let file_path = &self.get_file_path();
|
||||
|
||||
match crate::util::delete_file(file_path) {
|
||||
// Ignore "file not found" errors. This can happen when the
|
||||
// upstream caller has already cleaned up the file as part of
|
||||
// its own error handling.
|
||||
Err(e) if e.kind() == ErrorKind::NotFound => {
|
||||
debug!("File '{}' already deleted.", file_path);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
_ => Ok(()),
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for attachment in Attachment::find_by_cipher(&cipher_uuid, &conn) {
|
||||
attachment.delete(&conn)?;
|
||||
for attachment in Attachment::find_by_cipher(cipher_uuid, conn) {
|
||||
attachment.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -1,15 +1,10 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::CONFIG;
|
||||
|
||||
use super::{
|
||||
Attachment,
|
||||
CollectionCipher,
|
||||
Favorite,
|
||||
FolderCipher,
|
||||
Organization,
|
||||
User,
|
||||
UserOrgStatus,
|
||||
UserOrgType,
|
||||
Attachment, CollectionCipher, Favorite, FolderCipher, Organization, User, UserOrgStatus, UserOrgType,
|
||||
UserOrganization,
|
||||
};
|
||||
|
||||
@@ -43,9 +38,16 @@ db_object! {
|
||||
|
||||
pub password_history: Option<String>,
|
||||
pub deleted_at: Option<NaiveDateTime>,
|
||||
pub reprompt: Option<i32>,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub enum RepromptType {
|
||||
None = 0,
|
||||
Password = 1, // not currently used in server
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl Cipher {
|
||||
pub fn new(atype: i32, name: String) -> Self {
|
||||
@@ -68,6 +70,7 @@ impl Cipher {
|
||||
data: String::new(),
|
||||
password_history: None,
|
||||
deleted_at: None,
|
||||
reprompt: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -91,16 +94,16 @@ impl Cipher {
|
||||
};
|
||||
|
||||
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
let password_history_json =
|
||||
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
|
||||
let (read_only, hide_passwords) =
|
||||
match self.get_access_restrictions(&user_uuid, conn) {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
(true, true)
|
||||
},
|
||||
};
|
||||
let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, conn) {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
(true, true)
|
||||
}
|
||||
};
|
||||
|
||||
// Get the type_data or a default to an empty json object '{}'.
|
||||
// If not passing an empty object, mobile clients will crash.
|
||||
@@ -130,7 +133,7 @@ impl Cipher {
|
||||
|
||||
// There are three types of cipher response models in upstream
|
||||
// Bitwarden: "cipherMini", "cipher", and "cipherDetails" (in order
|
||||
// of increasing level of detail). bitwarden_rs currently only
|
||||
// of increasing level of detail). vaultwarden currently only
|
||||
// supports the "cipherDetails" type, though it seems like the
|
||||
// Bitwarden clients will ignore extra fields.
|
||||
//
|
||||
@@ -141,8 +144,9 @@ impl Cipher {
|
||||
"Type": self.atype,
|
||||
"RevisionDate": format_date(&self.updated_at),
|
||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||
"FolderId": self.get_folder_uuid(&user_uuid, conn),
|
||||
"Favorite": self.is_favorite(&user_uuid, conn),
|
||||
"FolderId": self.get_folder_uuid(user_uuid, conn),
|
||||
"Favorite": self.is_favorite(user_uuid, conn),
|
||||
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||
"OrganizationId": self.organization_uuid,
|
||||
"Attachments": attachments_json,
|
||||
// We have UseTotp set to true by default within the Organization model.
|
||||
@@ -189,18 +193,16 @@ impl Cipher {
|
||||
let mut user_uuids = Vec::new();
|
||||
match self.user_uuid {
|
||||
Some(ref user_uuid) => {
|
||||
User::update_uuid_revision(&user_uuid, conn);
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
user_uuids.push(user_uuid.clone())
|
||||
}
|
||||
None => {
|
||||
// Belongs to Organization, need to update affected users
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
UserOrganization::find_by_cipher_and_org(&self.uuid, &org_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
user_uuids.push(user_org.user_uuid.clone())
|
||||
});
|
||||
UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
user_uuids.push(user_org.user_uuid.clone())
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -258,23 +260,34 @@ impl Cipher {
|
||||
}
|
||||
|
||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for cipher in Self::find_by_org(org_uuid, &conn) {
|
||||
cipher.delete(&conn)?;
|
||||
for cipher in Self::find_by_org(org_uuid, conn) {
|
||||
cipher.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for cipher in Self::find_owned_by_user(user_uuid, &conn) {
|
||||
cipher.delete(&conn)?;
|
||||
for cipher in Self::find_owned_by_user(user_uuid, conn) {
|
||||
cipher.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Purge all ciphers that are old enough to be auto-deleted.
|
||||
pub fn purge_trash(conn: &DbConn) {
|
||||
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
||||
let now = Utc::now().naive_utc();
|
||||
let dt = now - Duration::days(auto_delete_days);
|
||||
for cipher in Self::find_deleted_before(&dt, conn) {
|
||||
cipher.delete(conn).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
|
||||
match (self.get_folder_uuid(&user_uuid, conn), folder_uuid) {
|
||||
match (self.get_folder_uuid(user_uuid, conn), folder_uuid) {
|
||||
// No changes
|
||||
(None, None) => Ok(()),
|
||||
(Some(ref old), Some(ref new)) if old == new => Ok(()),
|
||||
@@ -306,7 +319,7 @@ impl Cipher {
|
||||
/// Returns whether this cipher is owned by an org in which the user has full access.
|
||||
pub fn is_in_full_access_org(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user_uuid, &org_uuid, conn) {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
|
||||
return user_org.has_full_access();
|
||||
}
|
||||
}
|
||||
@@ -323,7 +336,7 @@ impl Cipher {
|
||||
// Check whether this cipher is directly owned by the user, or is in
|
||||
// a collection that the user has full access to. If so, there are no
|
||||
// access restrictions.
|
||||
if self.is_owned_by_user(&user_uuid) || self.is_in_full_access_org(&user_uuid, &conn) {
|
||||
if self.is_owned_by_user(user_uuid) || self.is_in_full_access_org(user_uuid, conn) {
|
||||
return Some((false, false));
|
||||
}
|
||||
|
||||
@@ -364,14 +377,14 @@ impl Cipher {
|
||||
}
|
||||
|
||||
pub fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match self.get_access_restrictions(&user_uuid, &conn) {
|
||||
match self.get_access_restrictions(user_uuid, conn) {
|
||||
Some((read_only, _hide_passwords)) => !read_only,
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
self.get_access_restrictions(&user_uuid, &conn).is_some()
|
||||
self.get_access_restrictions(user_uuid, conn).is_some()
|
||||
}
|
||||
|
||||
// Returns whether this cipher is a favorite of the specified user.
|
||||
@@ -511,6 +524,15 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
/// Find all ciphers that were deleted before the specified datetime.
|
||||
pub fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::deleted_at.lt(dt))
|
||||
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec<String> {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
|
@@ -1,6 +1,6 @@
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{Organization, UserOrgStatus, UserOrgType, UserOrganization, User, Cipher};
|
||||
use super::{Cipher, Organization, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
@@ -109,8 +109,8 @@ impl Collection {
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
CollectionCipher::delete_all_by_collection(&self.uuid, &conn)?;
|
||||
CollectionUser::delete_all_by_collection(&self.uuid, &conn)?;
|
||||
CollectionCipher::delete_all_by_collection(&self.uuid, conn)?;
|
||||
CollectionUser::delete_all_by_collection(&self.uuid, conn)?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(collections::table.filter(collections::uuid.eq(self.uuid)))
|
||||
@@ -120,18 +120,16 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for collection in Self::find_by_organization(org_uuid, &conn) {
|
||||
collection.delete(&conn)?;
|
||||
for collection in Self::find_by_organization(org_uuid, conn) {
|
||||
collection.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) {
|
||||
UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
@@ -170,10 +168,7 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
Self::find_by_user_uuid(user_uuid, conn)
|
||||
.into_iter()
|
||||
.filter(|c| c.org_uuid == org_uuid)
|
||||
.collect()
|
||||
Self::find_by_user_uuid(user_uuid, conn).into_iter().filter(|c| c.org_uuid == org_uuid).collect()
|
||||
}
|
||||
|
||||
pub fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
@@ -225,7 +220,7 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub fn is_writable_by_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(&user_uuid, &self.org_uuid, &conn) {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn) {
|
||||
None => false, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
@@ -247,7 +242,7 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub fn hide_passwords_for_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(&user_uuid, &self.org_uuid, &conn) {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn) {
|
||||
None => true, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
@@ -284,8 +279,14 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn save(user_uuid: &str, collection_uuid: &str, read_only: bool, hide_passwords: bool, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&user_uuid, conn);
|
||||
pub fn save(
|
||||
user_uuid: &str,
|
||||
collection_uuid: &str,
|
||||
read_only: bool,
|
||||
hide_passwords: bool,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -374,11 +375,9 @@ impl CollectionUser {
|
||||
}
|
||||
|
||||
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
CollectionUser::find_by_collection(&collection_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|collection| {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn);
|
||||
});
|
||||
CollectionUser::find_by_collection(collection_uuid, conn).iter().for_each(|collection| {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn);
|
||||
});
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(users_collections::table.filter(users_collections::collection_uuid.eq(collection_uuid)))
|
||||
@@ -407,7 +406,7 @@ impl CollectionUser {
|
||||
/// Database methods
|
||||
impl CollectionCipher {
|
||||
pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(&collection_uuid, conn);
|
||||
Self::update_users_revision(collection_uuid, conn);
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -437,7 +436,7 @@ impl CollectionCipher {
|
||||
}
|
||||
|
||||
pub fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(&collection_uuid, conn);
|
||||
Self::update_users_revision(collection_uuid, conn);
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
|
@@ -143,8 +143,8 @@ impl Device {
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for device in Self::find_by_user(user_uuid, &conn) {
|
||||
device.delete(&conn)?;
|
||||
for device in Self::find_by_user(user_uuid, conn) {
|
||||
device.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@ use crate::error::MapResult;
|
||||
impl Favorite {
|
||||
// Returns whether the specified cipher is a favorite of the specified user.
|
||||
pub fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
db_run!{ conn: {
|
||||
db_run! { conn: {
|
||||
let query = favorites::table
|
||||
.filter(favorites::cipher_uuid.eq(cipher_uuid))
|
||||
.filter(favorites::user_uuid.eq(user_uuid))
|
||||
@@ -32,23 +32,23 @@ impl Favorite {
|
||||
|
||||
// Sets whether the specified cipher is a favorite of the specified user.
|
||||
pub fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, &conn), favorite);
|
||||
let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn), favorite);
|
||||
match (old, new) {
|
||||
(false, true) => {
|
||||
User::update_uuid_revision(user_uuid, &conn);
|
||||
db_run!{ conn: {
|
||||
diesel::insert_into(favorites::table)
|
||||
.values((
|
||||
favorites::user_uuid.eq(user_uuid),
|
||||
favorites::cipher_uuid.eq(cipher_uuid),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error adding favorite")
|
||||
}}
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
db_run! { conn: {
|
||||
diesel::insert_into(favorites::table)
|
||||
.values((
|
||||
favorites::user_uuid.eq(user_uuid),
|
||||
favorites::cipher_uuid.eq(cipher_uuid),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error adding favorite")
|
||||
}}
|
||||
}
|
||||
(true, false) => {
|
||||
User::update_uuid_revision(user_uuid, &conn);
|
||||
db_run!{ conn: {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
favorites::table
|
||||
.filter(favorites::user_uuid.eq(user_uuid))
|
||||
@@ -59,7 +59,7 @@ impl Favorite {
|
||||
}}
|
||||
}
|
||||
// Otherwise, the favorite status is already what it should be.
|
||||
_ => Ok(())
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -107,8 +107,7 @@ impl Folder {
|
||||
|
||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
FolderCipher::delete_all_by_folder(&self.uuid, &conn)?;
|
||||
|
||||
FolderCipher::delete_all_by_folder(&self.uuid, conn)?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(folders::table.filter(folders::uuid.eq(&self.uuid)))
|
||||
@@ -118,8 +117,8 @@ impl Folder {
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for folder in Self::find_by_user(user_uuid, &conn) {
|
||||
folder.delete(&conn)?;
|
||||
for folder in Self::find_by_user(user_uuid, conn) {
|
||||
folder.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -6,9 +6,9 @@ mod favorite;
|
||||
mod folder;
|
||||
mod org_policy;
|
||||
mod organization;
|
||||
mod send;
|
||||
mod two_factor;
|
||||
mod user;
|
||||
mod send;
|
||||
|
||||
pub use self::attachment::Attachment;
|
||||
pub use self::cipher::Cipher;
|
||||
@@ -18,6 +18,6 @@ pub use self::favorite::Favorite;
|
||||
pub use self::folder::{Folder, FolderCipher};
|
||||
pub use self::org_policy::{OrgPolicy, OrgPolicyType};
|
||||
pub use self::organization::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
pub use self::send::{Send, SendType};
|
||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||
pub use self::user::{Invitation, User, UserStampException};
|
||||
pub use self::send::{Send, SendType};
|
@@ -1,10 +1,12 @@
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api::EmptyResult;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
use crate::util::UpCase;
|
||||
|
||||
use super::{Organization, UserOrganization, UserOrgStatus, UserOrgType};
|
||||
use super::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
@@ -20,8 +22,7 @@ db_object! {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
#[derive(num_derive::FromPrimitive)]
|
||||
#[derive(Copy, Clone, PartialEq, num_derive::FromPrimitive)]
|
||||
pub enum OrgPolicyType {
|
||||
TwoFactorAuthentication = 0,
|
||||
MasterPassword = 1,
|
||||
@@ -30,6 +31,14 @@ pub enum OrgPolicyType {
|
||||
// RequireSso = 4, // Not currently supported.
|
||||
PersonalOwnership = 5,
|
||||
DisableSend = 6,
|
||||
SendOptions = 7,
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/master/src/Core/Models/Data/SendOptionsPolicyData.cs
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendOptionsPolicyData {
|
||||
pub DisableHideEmail: bool,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
@@ -175,7 +184,8 @@ impl OrgPolicy {
|
||||
/// and the user is not an owner or admin of that org. This is only useful for checking
|
||||
/// applicability of policy types that have these particular semantics.
|
||||
pub fn is_applicable_to_user(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> bool {
|
||||
for policy in OrgPolicy::find_by_user(user_uuid, conn) { // Returns confirmed users only.
|
||||
// Returns confirmed users only.
|
||||
for policy in OrgPolicy::find_by_user(user_uuid, conn) {
|
||||
if policy.enabled && policy.has_type(policy_type) {
|
||||
let org_uuid = &policy.org_uuid;
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
|
||||
@@ -188,6 +198,30 @@ impl OrgPolicy {
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns true if the user belongs to an org that has enabled the `DisableHideEmail`
|
||||
/// option of the `Send Options` policy, and the user is not an owner or admin of that org.
|
||||
pub fn is_hide_email_disabled(user_uuid: &str, conn: &DbConn) -> bool {
|
||||
// Returns confirmed users only.
|
||||
for policy in OrgPolicy::find_by_user(user_uuid, conn) {
|
||||
if policy.enabled && policy.has_type(OrgPolicyType::SendOptions) {
|
||||
let org_uuid = &policy.org_uuid;
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
|
||||
if user.atype < UserOrgType::Admin {
|
||||
match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) {
|
||||
Ok(opts) => {
|
||||
if opts.data.DisableHideEmail {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
_ => error!("Failed to deserialize policy data: {}", policy.data),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/*pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
||||
|
@@ -1,8 +1,8 @@
|
||||
use num_traits::FromPrimitive;
|
||||
use serde_json::Value;
|
||||
use std::cmp::Ordering;
|
||||
use num_traits::FromPrimitive;
|
||||
|
||||
use super::{CollectionUser, User, OrgPolicy};
|
||||
use super::{CollectionUser, OrgPolicy, OrgPolicyType, User};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -12,6 +12,8 @@ db_object! {
|
||||
pub uuid: String,
|
||||
pub name: String,
|
||||
pub billing_email: String,
|
||||
pub private_key: Option<String>,
|
||||
pub public_key: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -35,8 +37,7 @@ pub enum UserOrgStatus {
|
||||
Confirmed = 2,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
#[derive(num_derive::FromPrimitive)]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, num_derive::FromPrimitive)]
|
||||
pub enum UserOrgType {
|
||||
Owner = 0,
|
||||
Admin = 1,
|
||||
@@ -123,12 +124,13 @@ impl PartialOrd<UserOrgType> for i32 {
|
||||
|
||||
/// Local methods
|
||||
impl Organization {
|
||||
pub fn new(name: String, billing_email: String) -> Self {
|
||||
pub fn new(name: String, billing_email: String, private_key: Option<String>, public_key: Option<String>) -> Self {
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
|
||||
name,
|
||||
billing_email,
|
||||
private_key,
|
||||
public_key,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,14 +143,16 @@ impl Organization {
|
||||
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
"Use2fa": true,
|
||||
"UseDirectory": false,
|
||||
"UseEvents": false,
|
||||
"UseGroups": false,
|
||||
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||
"UseEvents": false, // not supported by us
|
||||
"UseGroups": false, // not supported by us
|
||||
"UseTotp": true,
|
||||
"UsePolicies": true,
|
||||
"UseSso": false, // We do not support SSO
|
||||
"SelfHost": true,
|
||||
"UseApi": false, // not supported by us
|
||||
"HasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
|
||||
"ResetPasswordEnrolled": false, // not supported by us
|
||||
|
||||
"BusinessName": null,
|
||||
"BusinessAddress1": null,
|
||||
@@ -190,11 +194,9 @@ use crate::error::MapResult;
|
||||
/// Database methods
|
||||
impl Organization {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
UserOrganization::find_by_org(&self.uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
UserOrganization::find_by_org(&self.uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -231,11 +233,10 @@ impl Organization {
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
use super::{Cipher, Collection};
|
||||
|
||||
Cipher::delete_all_by_organization(&self.uuid, &conn)?;
|
||||
Collection::delete_all_by_organization(&self.uuid, &conn)?;
|
||||
UserOrganization::delete_all_by_organization(&self.uuid, &conn)?;
|
||||
OrgPolicy::delete_all_by_organization(&self.uuid, &conn)?;
|
||||
|
||||
Cipher::delete_all_by_organization(&self.uuid, conn)?;
|
||||
Collection::delete_all_by_organization(&self.uuid, conn)?;
|
||||
UserOrganization::delete_all_by_organization(&self.uuid, conn)?;
|
||||
OrgPolicy::delete_all_by_organization(&self.uuid, conn)?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
||||
@@ -273,13 +274,15 @@ impl UserOrganization {
|
||||
"UsersGetPremium": true,
|
||||
|
||||
"Use2fa": true,
|
||||
"UseDirectory": false,
|
||||
"UseEvents": false,
|
||||
"UseGroups": false,
|
||||
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||
"UseEvents": false, // not supported by us
|
||||
"UseGroups": false, // not supported by us
|
||||
"UseTotp": true,
|
||||
"UsePolicies": true,
|
||||
"UseApi": false, // not supported by us
|
||||
"SelfHost": true,
|
||||
"HasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
|
||||
"ResetPasswordEnrolled": false, // not supported by us
|
||||
"SsoBound": false, // We do not support SSO
|
||||
"UseSso": false, // We do not support SSO
|
||||
// TODO: Add support for Business Portal
|
||||
@@ -297,10 +300,12 @@ impl UserOrganization {
|
||||
// "AccessReports": false,
|
||||
// "ManageAllCollections": false,
|
||||
// "ManageAssignedCollections": false,
|
||||
// "ManageCiphers": false,
|
||||
// "ManageGroups": false,
|
||||
// "ManagePolicies": false,
|
||||
// "ManageResetPassword": false,
|
||||
// "ManageSso": false,
|
||||
// "ManageUsers": false
|
||||
// "ManageUsers": false,
|
||||
// },
|
||||
|
||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
@@ -347,11 +352,13 @@ impl UserOrganization {
|
||||
let collections = CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn);
|
||||
collections
|
||||
.iter()
|
||||
.map(|c| json!({
|
||||
"Id": c.collection_uuid,
|
||||
"ReadOnly": c.read_only,
|
||||
"HidePasswords": c.hide_passwords,
|
||||
}))
|
||||
.map(|c| {
|
||||
json!({
|
||||
"Id": c.collection_uuid,
|
||||
"ReadOnly": c.read_only,
|
||||
"HidePasswords": c.hide_passwords,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
|
||||
@@ -404,7 +411,7 @@ impl UserOrganization {
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
|
||||
CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, &conn)?;
|
||||
CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, conn)?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(users_organizations::table.filter(users_organizations::uuid.eq(self.uuid)))
|
||||
@@ -414,22 +421,22 @@ impl UserOrganization {
|
||||
}
|
||||
|
||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in Self::find_by_org(&org_uuid, &conn) {
|
||||
user_org.delete(&conn)?;
|
||||
for user_org in Self::find_by_org(org_uuid, conn) {
|
||||
user_org.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in Self::find_any_state_by_user(&user_uuid, &conn) {
|
||||
user_org.delete(&conn)?;
|
||||
for user_org in Self::find_any_state_by_user(user_uuid, conn) {
|
||||
user_org.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_by_email_and_org(email: &str, org_id: &str, conn: &DbConn) -> Option<UserOrganization> {
|
||||
if let Some(user) = super::User::find_by_mail(email, conn) {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, &conn) {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn) {
|
||||
return Some(user_org);
|
||||
}
|
||||
}
|
||||
@@ -446,8 +453,7 @@ impl UserOrganization {
|
||||
}
|
||||
|
||||
pub fn has_full_access(&self) -> bool {
|
||||
(self.access_all || self.atype >= UserOrgType::Admin) &&
|
||||
self.has_status(UserOrgStatus::Confirmed)
|
||||
(self.access_all || self.atype >= UserOrgType::Admin) && self.has_status(UserOrgStatus::Confirmed)
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
@@ -538,6 +544,25 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.inner_join(
|
||||
org_policies::table.on(
|
||||
org_policies::org_uuid.eq(users_organizations::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||
.and(org_policies::atype.eq(policy_type as i32))
|
||||
.and(org_policies::enabled.eq(true)))
|
||||
)
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
)
|
||||
.select(users_organizations::all_columns)
|
||||
.load::<UserOrganizationDb>(conn)
|
||||
.unwrap_or_default().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
|
@@ -36,6 +36,7 @@ db_object! {
|
||||
pub deletion_date: NaiveDateTime,
|
||||
|
||||
pub disabled: bool,
|
||||
pub hide_email: Option<bool>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,6 +74,7 @@ impl Send {
|
||||
deletion_date,
|
||||
|
||||
disabled: false,
|
||||
hide_email: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +103,22 @@ impl Send {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn creator_identifier(&self, conn: &DbConn) -> Option<String> {
|
||||
if let Some(hide_email) = self.hide_email {
|
||||
if hide_email {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(user_uuid) = &self.user_uuid {
|
||||
if let Some(user) = User::find_by_uuid(user_uuid, conn) {
|
||||
return Some(user.email);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
use crate::util::format_date;
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
@@ -123,6 +141,7 @@ impl Send {
|
||||
"AccessCount": self.access_count,
|
||||
"Password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
|
||||
"Disabled": self.disabled,
|
||||
"HideEmail": self.hide_email,
|
||||
|
||||
"RevisionDate": format_date(&self.revision_date),
|
||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
||||
@@ -131,7 +150,7 @@ impl Send {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_access(&self) -> Value {
|
||||
pub fn to_json_access(&self, conn: &DbConn) -> Value {
|
||||
use crate::util::format_date;
|
||||
|
||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
||||
@@ -145,6 +164,7 @@ impl Send {
|
||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||
|
||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
||||
"CreatorIdentifier": self.creator_identifier(conn),
|
||||
"Object": "send-access",
|
||||
})
|
||||
}
|
||||
@@ -205,10 +225,17 @@ impl Send {
|
||||
}}
|
||||
}
|
||||
|
||||
/// Purge all sends that are past their deletion date.
|
||||
pub fn purge(conn: &DbConn) {
|
||||
for send in Self::find_by_past_deletion_date(conn) {
|
||||
send.delete(conn).ok();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) {
|
||||
match &self.user_uuid {
|
||||
Some(user_uuid) => {
|
||||
User::update_uuid_revision(&user_uuid, conn);
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
}
|
||||
None => {
|
||||
// Belongs to Organization, not implemented
|
||||
@@ -217,18 +244,12 @@ impl Send {
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for send in Self::find_by_user(user_uuid, &conn) {
|
||||
send.delete(&conn)?;
|
||||
for send in Self::find_by_user(user_uuid, conn) {
|
||||
send.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_all(conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option<Self> {
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use uuid::Uuid;
|
||||
@@ -271,4 +292,13 @@ impl Send {
|
||||
.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_past_deletion_date(conn: &DbConn) -> Vec<Self> {
|
||||
let now = Utc::now().naive_utc();
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::deletion_date.lt(now))
|
||||
.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
@@ -31,11 +31,14 @@ pub enum TwoFactorType {
|
||||
U2f = 4,
|
||||
Remember = 5,
|
||||
OrganizationDuo = 6,
|
||||
Webauthn = 7,
|
||||
|
||||
// These are implementation details
|
||||
U2fRegisterChallenge = 1000,
|
||||
U2fLoginChallenge = 1001,
|
||||
EmailVerificationChallenge = 1002,
|
||||
WebauthnRegisterChallenge = 1003,
|
||||
WebauthnLoginChallenge = 1004,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
@@ -146,4 +149,73 @@ impl TwoFactor {
|
||||
.map_res("Error deleting twofactors")
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn migrate_u2f_to_webauthn(conn: &DbConn) -> EmptyResult {
|
||||
let u2f_factors = db_run! { conn: {
|
||||
twofactor::table
|
||||
.filter(twofactor::atype.eq(TwoFactorType::U2f as i32))
|
||||
.load::<TwoFactorDb>(conn)
|
||||
.expect("Error loading twofactor")
|
||||
.from_db()
|
||||
}};
|
||||
|
||||
use crate::api::core::two_factor::u2f::U2FRegistration;
|
||||
use crate::api::core::two_factor::webauthn::{get_webauthn_registrations, WebauthnRegistration};
|
||||
use std::convert::TryInto;
|
||||
use webauthn_rs::proto::*;
|
||||
|
||||
for mut u2f in u2f_factors {
|
||||
let mut regs: Vec<U2FRegistration> = serde_json::from_str(&u2f.data)?;
|
||||
// If there are no registrations or they are migrated (we do the migration in batch so we can consider them all migrated when the first one is)
|
||||
if regs.is_empty() || regs[0].migrated == Some(true) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let (_, mut webauthn_regs) = get_webauthn_registrations(&u2f.user_uuid, conn)?;
|
||||
|
||||
// If the user already has webauthn registrations saved, don't overwrite them
|
||||
if !webauthn_regs.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
for reg in &mut regs {
|
||||
let x: [u8; 32] = reg.reg.pub_key[1..33].try_into().unwrap();
|
||||
let y: [u8; 32] = reg.reg.pub_key[33..65].try_into().unwrap();
|
||||
|
||||
let key = COSEKey {
|
||||
type_: COSEAlgorithm::ES256,
|
||||
key: COSEKeyType::EC_EC2(COSEEC2Key {
|
||||
curve: ECDSACurve::SECP256R1,
|
||||
x,
|
||||
y,
|
||||
}),
|
||||
};
|
||||
|
||||
let new_reg = WebauthnRegistration {
|
||||
id: reg.id,
|
||||
migrated: true,
|
||||
name: reg.name.clone(),
|
||||
credential: Credential {
|
||||
counter: reg.counter,
|
||||
verified: false,
|
||||
cred: key,
|
||||
cred_id: reg.reg.key_handle.clone(),
|
||||
registration_policy: UserVerificationPolicy::Discouraged,
|
||||
},
|
||||
};
|
||||
|
||||
webauthn_regs.push(new_reg);
|
||||
|
||||
reg.migrated = Some(true);
|
||||
}
|
||||
|
||||
u2f.data = serde_json::to_string(®s)?;
|
||||
u2f.save(conn)?;
|
||||
|
||||
TwoFactor::new(u2f.user_uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(&webauthn_regs)?)
|
||||
.save(conn)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::crypto;
|
||||
@@ -63,8 +63,9 @@ enum UserStatus {
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct UserStampException {
|
||||
pub route: String,
|
||||
pub security_stamp: String
|
||||
pub routes: Vec<String>,
|
||||
pub security_stamp: String,
|
||||
pub expire: i64,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
@@ -135,9 +136,11 @@ impl User {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `password` - A str which contains a hashed version of the users master password.
|
||||
/// * `allow_next_route` - A Option<&str> with the function name of the next allowed (rocket) route.
|
||||
/// * `allow_next_route` - A Option<Vec<String>> with the function names of the next allowed (rocket) routes.
|
||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||
/// After these 2 minutes this stamp will expire.
|
||||
///
|
||||
pub fn set_password(&mut self, password: &str, allow_next_route: Option<&str>) {
|
||||
pub fn set_password(&mut self, password: &str, allow_next_route: Option<Vec<String>>) {
|
||||
self.password_hash = crypto::hash_password(password.as_bytes(), &self.salt, self.password_iterations as u32);
|
||||
|
||||
if let Some(route) = allow_next_route {
|
||||
@@ -154,24 +157,20 @@ impl User {
|
||||
/// Set the stamp_exception to only allow a subsequent request matching a specific route using the current security-stamp.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `route_exception` - A str with the function name of the next allowed (rocket) route.
|
||||
/// * `route_exception` - A Vec<String> with the function names of the next allowed (rocket) routes.
|
||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||
/// After these 2 minutes this stamp will expire.
|
||||
///
|
||||
/// ### Future
|
||||
/// In the future it could be posible that we need more of these exception routes.
|
||||
/// In that case we could use an Vec<UserStampException> and add multiple exceptions.
|
||||
pub fn set_stamp_exception(&mut self, route_exception: &str) {
|
||||
pub fn set_stamp_exception(&mut self, route_exception: Vec<String>) {
|
||||
let stamp_exception = UserStampException {
|
||||
route: route_exception.to_string(),
|
||||
security_stamp: self.security_stamp.to_string()
|
||||
routes: route_exception,
|
||||
security_stamp: self.security_stamp.to_string(),
|
||||
expire: (Utc::now().naive_utc() + Duration::minutes(2)).timestamp(),
|
||||
};
|
||||
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
||||
}
|
||||
|
||||
/// Resets the stamp_exception to prevent re-use of the previous security-stamp
|
||||
///
|
||||
/// ### Future
|
||||
/// In the future it could be posible that we need more of these exception routes.
|
||||
/// In that case we could use an Vec<UserStampException> and add multiple exceptions.
|
||||
pub fn reset_stamp_exception(&mut self) {
|
||||
self.stamp_exception = None;
|
||||
}
|
||||
@@ -187,7 +186,7 @@ use crate::error::MapResult;
|
||||
impl User {
|
||||
pub fn to_json(&self, conn: &DbConn) -> Value {
|
||||
let orgs = UserOrganization::find_by_user(&self.uuid, conn);
|
||||
let orgs_json: Vec<Value> = orgs.iter().map(|c| c.to_json(&conn)).collect();
|
||||
let orgs_json: Vec<Value> = orgs.iter().map(|c| c.to_json(conn)).collect();
|
||||
let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).is_empty();
|
||||
|
||||
// TODO: Might want to save the status field in the DB
|
||||
@@ -341,14 +340,16 @@ impl User {
|
||||
pub fn last_active(&self, conn: &DbConn) -> Option<NaiveDateTime> {
|
||||
match Device::find_latest_active_by_user(&self.uuid, conn) {
|
||||
Some(device) => Some(device.updated_at),
|
||||
None => None
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Invitation {
|
||||
pub const fn new(email: String) -> Self {
|
||||
Self { email }
|
||||
Self {
|
||||
email,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
@@ -396,8 +397,8 @@ impl Invitation {
|
||||
}
|
||||
|
||||
pub fn take(mail: &str, conn: &DbConn) -> bool {
|
||||
match Self::find_by_mail(mail, &conn) {
|
||||
Some(invitation) => invitation.delete(&conn).is_ok(),
|
||||
match Self::find_by_mail(mail, conn) {
|
||||
Some(invitation) => invitation.delete(conn).is_ok(),
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
@@ -22,6 +22,7 @@ table! {
|
||||
data -> Text,
|
||||
password_history -> Nullable<Text>,
|
||||
deleted_at -> Nullable<Datetime>,
|
||||
reprompt -> Nullable<Integer>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,6 +100,8 @@ table! {
|
||||
uuid -> Text,
|
||||
name -> Text,
|
||||
billing_email -> Text,
|
||||
private_key -> Nullable<Text>,
|
||||
public_key -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,6 +125,7 @@ table! {
|
||||
expiration_date -> Nullable<Datetime>,
|
||||
deletion_date -> Datetime,
|
||||
disabled -> Bool,
|
||||
hide_email -> Nullable<Bool>,
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -22,6 +22,7 @@ table! {
|
||||
data -> Text,
|
||||
password_history -> Nullable<Text>,
|
||||
deleted_at -> Nullable<Timestamp>,
|
||||
reprompt -> Nullable<Integer>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,6 +100,8 @@ table! {
|
||||
uuid -> Text,
|
||||
name -> Text,
|
||||
billing_email -> Text,
|
||||
private_key -> Nullable<Text>,
|
||||
public_key -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,6 +125,7 @@ table! {
|
||||
expiration_date -> Nullable<Timestamp>,
|
||||
deletion_date -> Timestamp,
|
||||
disabled -> Bool,
|
||||
hide_email -> Nullable<Bool>,
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -22,6 +22,7 @@ table! {
|
||||
data -> Text,
|
||||
password_history -> Nullable<Text>,
|
||||
deleted_at -> Nullable<Timestamp>,
|
||||
reprompt -> Nullable<Integer>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,6 +100,8 @@ table! {
|
||||
uuid -> Text,
|
||||
name -> Text,
|
||||
billing_email -> Text,
|
||||
private_key -> Nullable<Text>,
|
||||
public_key -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,6 +125,7 @@ table! {
|
||||
expiration_date -> Nullable<Timestamp>,
|
||||
deletion_date -> Timestamp,
|
||||
disabled -> Bool,
|
||||
hide_email -> Nullable<Bool>,
|
||||
}
|
||||
}
|
||||
|
||||
|
79
src/error.rs
@@ -33,26 +33,25 @@ macro_rules! make_error {
|
||||
};
|
||||
}
|
||||
|
||||
use diesel::r2d2::PoolError as R2d2Err;
|
||||
use diesel::result::Error as DieselErr;
|
||||
use diesel::ConnectionError as DieselConErr;
|
||||
use diesel_migrations::RunMigrationsError as DieselMigErr;
|
||||
use diesel::r2d2::PoolError as R2d2Err;
|
||||
use handlebars::RenderError as HbErr;
|
||||
use jsonwebtoken::errors::Error as JwtErr;
|
||||
use lettre::address::AddressError as AddrErr;
|
||||
use lettre::error::Error as LettreErr;
|
||||
use lettre::transport::smtp::Error as SmtpErr;
|
||||
use openssl::error::ErrorStack as SSLErr;
|
||||
use regex::Error as RegexErr;
|
||||
use reqwest::Error as ReqErr;
|
||||
use serde_json::{Error as SerdeErr, Value};
|
||||
use std::io::Error as IoErr;
|
||||
|
||||
use std::time::SystemTimeError as TimeErr;
|
||||
use u2f::u2ferror::U2fError as U2fErr;
|
||||
use webauthn_rs::error::WebauthnError as WebauthnErr;
|
||||
use yubico::yubicoerror::YubicoError as YubiErr;
|
||||
|
||||
use lettre::address::AddressError as AddrErr;
|
||||
use lettre::error::Error as LettreErr;
|
||||
use lettre::message::mime::FromStrError as FromStrErr;
|
||||
use lettre::transport::smtp::Error as SmtpErr;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct Empty {}
|
||||
|
||||
@@ -63,31 +62,32 @@ pub struct Empty {}
|
||||
// The second one contains the function used to obtain the response sent to the client
|
||||
make_error! {
|
||||
// Just an empty error
|
||||
EmptyError(Empty): _no_source, _serialize,
|
||||
Empty(Empty): _no_source, _serialize,
|
||||
// Used to represent err! calls
|
||||
SimpleError(String): _no_source, _api_error,
|
||||
Simple(String): _no_source, _api_error,
|
||||
// Used for special return values, like 2FA errors
|
||||
JsonError(Value): _no_source, _serialize,
|
||||
DbError(DieselErr): _has_source, _api_error,
|
||||
R2d2Error(R2d2Err): _has_source, _api_error,
|
||||
U2fError(U2fErr): _has_source, _api_error,
|
||||
SerdeError(SerdeErr): _has_source, _api_error,
|
||||
JWtError(JwtErr): _has_source, _api_error,
|
||||
TemplError(HbErr): _has_source, _api_error,
|
||||
Json(Value): _no_source, _serialize,
|
||||
Db(DieselErr): _has_source, _api_error,
|
||||
R2d2(R2d2Err): _has_source, _api_error,
|
||||
U2f(U2fErr): _has_source, _api_error,
|
||||
Serde(SerdeErr): _has_source, _api_error,
|
||||
JWt(JwtErr): _has_source, _api_error,
|
||||
Handlebars(HbErr): _has_source, _api_error,
|
||||
//WsError(ws::Error): _has_source, _api_error,
|
||||
IoError(IoErr): _has_source, _api_error,
|
||||
TimeError(TimeErr): _has_source, _api_error,
|
||||
ReqError(ReqErr): _has_source, _api_error,
|
||||
RegexError(RegexErr): _has_source, _api_error,
|
||||
YubiError(YubiErr): _has_source, _api_error,
|
||||
Io(IoErr): _has_source, _api_error,
|
||||
Time(TimeErr): _has_source, _api_error,
|
||||
Req(ReqErr): _has_source, _api_error,
|
||||
Regex(RegexErr): _has_source, _api_error,
|
||||
Yubico(YubiErr): _has_source, _api_error,
|
||||
|
||||
LettreError(LettreErr): _has_source, _api_error,
|
||||
AddressError(AddrErr): _has_source, _api_error,
|
||||
SmtpError(SmtpErr): _has_source, _api_error,
|
||||
FromStrError(FromStrErr): _has_source, _api_error,
|
||||
Lettre(LettreErr): _has_source, _api_error,
|
||||
Address(AddrErr): _has_source, _api_error,
|
||||
Smtp(SmtpErr): _has_source, _api_error,
|
||||
OpenSSL(SSLErr): _has_source, _api_error,
|
||||
|
||||
DieselConError(DieselConErr): _has_source, _api_error,
|
||||
DieselMigError(DieselMigErr): _has_source, _api_error,
|
||||
DieselCon(DieselConErr): _has_source, _api_error,
|
||||
DieselMig(DieselMigErr): _has_source, _api_error,
|
||||
Webauthn(WebauthnErr): _has_source, _api_error,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Error {
|
||||
@@ -95,15 +95,15 @@ impl std::fmt::Debug for Error {
|
||||
match self.source() {
|
||||
Some(e) => write!(f, "{}.\n[CAUSE] {:#?}", self.message, e),
|
||||
None => match self.error {
|
||||
ErrorKind::EmptyError(_) => Ok(()),
|
||||
ErrorKind::SimpleError(ref s) => {
|
||||
ErrorKind::Empty(_) => Ok(()),
|
||||
ErrorKind::Simple(ref s) => {
|
||||
if &self.message == s {
|
||||
write!(f, "{}", self.message)
|
||||
} else {
|
||||
write!(f, "{}. {}", self.message, s)
|
||||
}
|
||||
}
|
||||
ErrorKind::JsonError(_) => write!(f, "{}", self.message),
|
||||
ErrorKind::Json(_) => write!(f, "{}", self.message),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
}
|
||||
@@ -166,7 +166,7 @@ fn _serialize(e: &impl serde::Serialize, _msg: &str) -> String {
|
||||
|
||||
fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
|
||||
let json = json!({
|
||||
"Message": "",
|
||||
"Message": msg,
|
||||
"error": "",
|
||||
"error_description": "",
|
||||
"ValidationErrors": {"": [ msg ]},
|
||||
@@ -174,6 +174,9 @@ fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
|
||||
"Message": msg,
|
||||
"Object": "error"
|
||||
},
|
||||
"ExceptionMessage": null,
|
||||
"ExceptionStackTrace": null,
|
||||
"InnerExceptionMessage": null,
|
||||
"Object": "error"
|
||||
});
|
||||
_serialize(&json, "")
|
||||
@@ -191,18 +194,14 @@ use rocket::response::{self, Responder, Response};
|
||||
impl<'r> Responder<'r> for Error {
|
||||
fn respond_to(self, _: &Request) -> response::Result<'r> {
|
||||
match self.error {
|
||||
ErrorKind::EmptyError(_) => {} // Don't print the error in this situation
|
||||
ErrorKind::SimpleError(_) => {} // Don't print the error in this situation
|
||||
ErrorKind::Empty(_) => {} // Don't print the error in this situation
|
||||
ErrorKind::Simple(_) => {} // Don't print the error in this situation
|
||||
_ => error!(target: "error", "{:#?}", self),
|
||||
};
|
||||
|
||||
let code = Status::from_code(self.error_code).unwrap_or(Status::BadRequest);
|
||||
|
||||
Response::build()
|
||||
.status(code)
|
||||
.header(ContentType::JSON)
|
||||
.sized_body(Cursor::new(format!("{}", self)))
|
||||
.ok()
|
||||
Response::build().status(code).header(ContentType::JSON).sized_body(Cursor::new(format!("{}", self))).ok()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,11 +222,11 @@ macro_rules! err {
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! err_code {
|
||||
($msg:expr, $err_code: literal) => {{
|
||||
($msg:expr, $err_code: expr) => {{
|
||||
error!("{}", $msg);
|
||||
return Err(crate::error::Error::new($msg, $msg).with_code($err_code));
|
||||
}};
|
||||
($usr_msg:expr, $log_value:expr, $err_code: literal) => {{
|
||||
($usr_msg:expr, $log_value:expr, $err_code: expr) => {{
|
||||
error!("{}. {}", $usr_msg, $log_value);
|
||||
return Err(crate::error::Error::new($usr_msg, $log_value).with_code($err_code));
|
||||
}};
|
||||
|
47
src/mail.rs
@@ -1,4 +1,4 @@
|
||||
use std::{str::FromStr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use chrono::{DateTime, Local};
|
||||
use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
|
||||
@@ -27,7 +27,7 @@ fn mailer() -> SmtpTransport {
|
||||
.timeout(Some(Duration::from_secs(CONFIG.smtp_timeout())));
|
||||
|
||||
// Determine security
|
||||
let smtp_client = if CONFIG.smtp_ssl() {
|
||||
let smtp_client = if CONFIG.smtp_ssl() || CONFIG.smtp_explicit_tls() {
|
||||
let mut tls_parameters = TlsParameters::builder(host);
|
||||
if CONFIG.smtp_accept_invalid_hostnames() {
|
||||
tls_parameters = tls_parameters.dangerous_accept_invalid_hostnames(true);
|
||||
@@ -62,11 +62,13 @@ fn mailer() -> SmtpTransport {
|
||||
let mut selected_mechanisms = vec![];
|
||||
for wanted_mechanism in mechanism.split(',') {
|
||||
for m in &allowed_mechanisms {
|
||||
if m.to_string().to_lowercase() == wanted_mechanism.trim_matches(|c| c == '"' || c == '\'' || c == ' ').to_lowercase() {
|
||||
if m.to_string().to_lowercase()
|
||||
== wanted_mechanism.trim_matches(|c| c == '"' || c == '\'' || c == ' ').to_lowercase()
|
||||
{
|
||||
selected_mechanisms.push(*m);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if !selected_mechanisms.is_empty() {
|
||||
smtp_client.authentication(selected_mechanisms)
|
||||
@@ -97,9 +99,8 @@ fn get_template(template_name: &str, data: &serde_json::Value) -> Result<(String
|
||||
None => err!("Template doesn't contain subject"),
|
||||
};
|
||||
|
||||
use newline_converter::unix2dos;
|
||||
let body = match text_split.next() {
|
||||
Some(s) => unix2dos(s.trim()).to_string(),
|
||||
Some(s) => s.trim().to_string(),
|
||||
None => err!("Template doesn't contain body"),
|
||||
};
|
||||
|
||||
@@ -179,6 +180,18 @@ pub fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult {
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_2fa_removed_from_org(address: &str, org_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/send_2fa_removed_from_org",
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"org_name": org_name,
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_invite(
|
||||
address: &str,
|
||||
uuid: &str,
|
||||
@@ -305,42 +318,34 @@ fn send_email(address: &str, subject: &str, body_html: String, body_text: String
|
||||
let html = SinglePart::builder()
|
||||
// We force Base64 encoding because in the past we had issues with different encodings.
|
||||
.header(header::ContentTransferEncoding::Base64)
|
||||
.header(header::ContentType("text/html; charset=utf-8".parse()?))
|
||||
.header(header::ContentType::TEXT_HTML)
|
||||
.body(body_html);
|
||||
|
||||
let text = SinglePart::builder()
|
||||
// We force Base64 encoding because in the past we had issues with different encodings.
|
||||
.header(header::ContentTransferEncoding::Base64)
|
||||
.header(header::ContentType("text/plain; charset=utf-8".parse()?))
|
||||
.header(header::ContentType::TEXT_PLAIN)
|
||||
.body(body_text);
|
||||
|
||||
let smtp_from = &CONFIG.smtp_from();
|
||||
let email = Message::builder()
|
||||
.message_id(Some(format!("<{}@{}>", crate::util::get_uuid(), smtp_from.split('@').collect::<Vec<&str>>()[1] )))
|
||||
.message_id(Some(format!("<{}@{}>", crate::util::get_uuid(), smtp_from.split('@').collect::<Vec<&str>>()[1])))
|
||||
.to(Mailbox::new(None, Address::from_str(&address)?))
|
||||
.from(Mailbox::new(
|
||||
Some(CONFIG.smtp_from_name()),
|
||||
Address::from_str(smtp_from)?,
|
||||
))
|
||||
.from(Mailbox::new(Some(CONFIG.smtp_from_name()), Address::from_str(smtp_from)?))
|
||||
.subject(subject)
|
||||
.multipart(
|
||||
MultiPart::alternative()
|
||||
.singlepart(text)
|
||||
.singlepart(html)
|
||||
)?;
|
||||
.multipart(MultiPart::alternative().singlepart(text).singlepart(html))?;
|
||||
|
||||
match mailer().send(&email) {
|
||||
Ok(_) => Ok(()),
|
||||
// Match some common errors and make them more user friendly
|
||||
Err(e) => {
|
||||
|
||||
if e.is_client() {
|
||||
err!(format!("SMTP Client error: {}", e));
|
||||
err!(format!("SMTP Client error: {}", e));
|
||||
} else if e.is_transient() {
|
||||
err!(format!("SMTP 4xx error: {:?}", e));
|
||||
} else if e.is_permanent() {
|
||||
err!(format!("SMTP 5xx error: {:?}", e));
|
||||
} else if e.is_timeout() {
|
||||
} else if e.is_timeout() {
|
||||
err!(format!("SMTP timeout error: {:?}", e));
|
||||
} else {
|
||||
Err(e.into())
|
||||
|
159
src/main.rs
@@ -16,14 +16,8 @@ extern crate diesel;
|
||||
#[macro_use]
|
||||
extern crate diesel_migrations;
|
||||
|
||||
use std::{
|
||||
fs::create_dir_all,
|
||||
panic,
|
||||
path::Path,
|
||||
process::{exit, Command},
|
||||
str::FromStr,
|
||||
thread,
|
||||
};
|
||||
use job_scheduler::{Job, JobScheduler};
|
||||
use std::{fs::create_dir_all, panic, path::Path, process::exit, str::FromStr, thread, time::Duration};
|
||||
|
||||
#[macro_use]
|
||||
mod error;
|
||||
@@ -51,19 +45,26 @@ fn main() {
|
||||
let extra_debug = matches!(level, LF::Trace | LF::Debug);
|
||||
|
||||
check_data_folder();
|
||||
check_rsa_keys();
|
||||
check_rsa_keys().unwrap_or_else(|_| {
|
||||
error!("Error creating keys, exiting...");
|
||||
exit(1);
|
||||
});
|
||||
check_web_vault();
|
||||
|
||||
create_icon_cache_folder();
|
||||
|
||||
launch_rocket(extra_debug);
|
||||
let pool = create_db_pool();
|
||||
schedule_jobs(pool.clone());
|
||||
crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&pool.get().unwrap()).unwrap();
|
||||
|
||||
launch_rocket(pool, extra_debug); // Blocks until program termination.
|
||||
}
|
||||
|
||||
const HELP: &str = "\
|
||||
A Bitwarden API server written in Rust
|
||||
Alternative implementation of the Bitwarden server API written in Rust
|
||||
|
||||
USAGE:
|
||||
bitwarden_rs
|
||||
vaultwarden
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@@ -75,18 +76,18 @@ fn parse_args() {
|
||||
let mut pargs = pico_args::Arguments::from_env();
|
||||
|
||||
if pargs.contains(["-h", "--help"]) {
|
||||
println!("bitwarden_rs {}", option_env!("BWRS_VERSION").unwrap_or(NO_VERSION));
|
||||
println!("vaultwarden {}", option_env!("BWRS_VERSION").unwrap_or(NO_VERSION));
|
||||
print!("{}", HELP);
|
||||
exit(0);
|
||||
} else if pargs.contains(["-v", "--version"]) {
|
||||
println!("bitwarden_rs {}", option_env!("BWRS_VERSION").unwrap_or(NO_VERSION));
|
||||
println!("vaultwarden {}", option_env!("BWRS_VERSION").unwrap_or(NO_VERSION));
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
fn launch_info() {
|
||||
println!("/--------------------------------------------------------------------\\");
|
||||
println!("| Starting Bitwarden_RS |");
|
||||
println!("| Starting Vaultwarden |");
|
||||
|
||||
if let Some(version) = option_env!("BWRS_VERSION") {
|
||||
println!("|{:^68}|", format!("Version {}", version));
|
||||
@@ -96,9 +97,9 @@ fn launch_info() {
|
||||
println!("| This is an *unofficial* Bitwarden implementation, DO NOT use the |");
|
||||
println!("| official channels to report bugs/features, regardless of client. |");
|
||||
println!("| Send usage/configuration questions or feature requests to: |");
|
||||
println!("| https://bitwardenrs.discourse.group/ |");
|
||||
println!("| https://vaultwarden.discourse.group/ |");
|
||||
println!("| Report suspected bugs/issues in the software itself at: |");
|
||||
println!("| https://github.com/dani-garcia/bitwarden_rs/issues/new |");
|
||||
println!("| https://github.com/dani-garcia/vaultwarden/issues/new |");
|
||||
println!("\\--------------------------------------------------------------------/\n");
|
||||
}
|
||||
|
||||
@@ -118,12 +119,17 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
// Never show html5ever and hyper::proto logs, too noisy
|
||||
.level_for("html5ever", log::LevelFilter::Off)
|
||||
.level_for("hyper::proto", log::LevelFilter::Off)
|
||||
.level_for("hyper::client", log::LevelFilter::Off)
|
||||
// Prevent cookie_store logs
|
||||
.level_for("cookie_store", log::LevelFilter::Off)
|
||||
.chain(std::io::stdout());
|
||||
|
||||
// Enable smtp debug logging only specifically for smtp when need.
|
||||
// This can contain sensitive information we do not want in the default debug/trace logging.
|
||||
if CONFIG.smtp_debug() {
|
||||
println!("[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!");
|
||||
println!(
|
||||
"[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!"
|
||||
);
|
||||
println!("[WARNING] Only enable SMTP_DEBUG during troubleshooting!\n");
|
||||
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Debug)
|
||||
} else {
|
||||
@@ -201,7 +207,7 @@ fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
||||
let syslog_fmt = syslog::Formatter3164 {
|
||||
facility: syslog::Facility::LOG_USER,
|
||||
hostname: None,
|
||||
process: "bitwarden_rs".into(),
|
||||
process: "vaultwarden".into(),
|
||||
pid: 0,
|
||||
};
|
||||
|
||||
@@ -238,52 +244,29 @@ fn check_data_folder() {
|
||||
}
|
||||
}
|
||||
|
||||
fn check_rsa_keys() {
|
||||
fn check_rsa_keys() -> Result<(), crate::error::Error> {
|
||||
// If the RSA keys don't exist, try to create them
|
||||
if !util::file_exists(&CONFIG.private_rsa_key()) || !util::file_exists(&CONFIG.public_rsa_key()) {
|
||||
info!("JWT keys don't exist, checking if OpenSSL is available...");
|
||||
let priv_path = CONFIG.private_rsa_key();
|
||||
let pub_path = CONFIG.public_rsa_key();
|
||||
|
||||
Command::new("openssl").arg("version").status().unwrap_or_else(|_| {
|
||||
info!(
|
||||
"Can't create keys because OpenSSL is not available, make sure it's installed and available on the PATH"
|
||||
);
|
||||
exit(1);
|
||||
});
|
||||
if !util::file_exists(&priv_path) {
|
||||
let rsa_key = openssl::rsa::Rsa::generate(2048)?;
|
||||
|
||||
info!("OpenSSL detected, creating keys...");
|
||||
|
||||
let key = CONFIG.rsa_key_filename();
|
||||
|
||||
let pem = format!("{}.pem", key);
|
||||
let priv_der = format!("{}.der", key);
|
||||
let pub_der = format!("{}.pub.der", key);
|
||||
|
||||
let mut success = Command::new("openssl")
|
||||
.args(&["genrsa", "-out", &pem])
|
||||
.status()
|
||||
.expect("Failed to create private pem file")
|
||||
.success();
|
||||
|
||||
success &= Command::new("openssl")
|
||||
.args(&["rsa", "-in", &pem, "-outform", "DER", "-out", &priv_der])
|
||||
.status()
|
||||
.expect("Failed to create private der file")
|
||||
.success();
|
||||
|
||||
success &= Command::new("openssl")
|
||||
.args(&["rsa", "-in", &priv_der, "-inform", "DER"])
|
||||
.args(&["-RSAPublicKey_out", "-outform", "DER", "-out", &pub_der])
|
||||
.status()
|
||||
.expect("Failed to create public der file")
|
||||
.success();
|
||||
|
||||
if success {
|
||||
info!("Keys created correctly.");
|
||||
} else {
|
||||
error!("Error creating keys, exiting...");
|
||||
exit(1);
|
||||
}
|
||||
let priv_key = rsa_key.private_key_to_pem()?;
|
||||
crate::util::write_file(&priv_path, &priv_key)?;
|
||||
info!("Private key created correctly.");
|
||||
}
|
||||
|
||||
if !util::file_exists(&pub_path) {
|
||||
let rsa_key = openssl::rsa::Rsa::private_key_from_pem(&util::read_file(&priv_path)?)?;
|
||||
|
||||
let pub_key = rsa_key.public_key_to_pem()?;
|
||||
crate::util::write_file(&pub_path, &pub_key)?;
|
||||
info!("Public key created correctly.");
|
||||
}
|
||||
|
||||
auth::load_keys();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_web_vault() {
|
||||
@@ -294,24 +277,27 @@ fn check_web_vault() {
|
||||
let index_path = Path::new(&CONFIG.web_vault_folder()).join("index.html");
|
||||
|
||||
if !index_path.exists() {
|
||||
error!("Web vault is not found at '{}'. To install it, please follow the steps in: ", CONFIG.web_vault_folder());
|
||||
error!("https://github.com/dani-garcia/bitwarden_rs/wiki/Building-binary#install-the-web-vault");
|
||||
error!(
|
||||
"Web vault is not found at '{}'. To install it, please follow the steps in: ",
|
||||
CONFIG.web_vault_folder()
|
||||
);
|
||||
error!("https://github.com/dani-garcia/vaultwarden/wiki/Building-binary#install-the-web-vault");
|
||||
error!("You can also set the environment variable 'WEB_VAULT_ENABLED=false' to disable it");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
fn launch_rocket(extra_debug: bool) {
|
||||
let pool = match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) {
|
||||
fn create_db_pool() -> db::DbPool {
|
||||
match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
error!("Error creating database pool: {:?}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
api::start_send_deletion_scheduler(pool.clone());
|
||||
}
|
||||
}
|
||||
|
||||
fn launch_rocket(pool: db::DbPool, extra_debug: bool) {
|
||||
let basepath = &CONFIG.domain_path();
|
||||
|
||||
// If adding more paths here, consider also adding them to
|
||||
@@ -334,3 +320,40 @@ fn launch_rocket(extra_debug: bool) {
|
||||
// The launch will restore the original logging level
|
||||
error!("Launch error {:#?}", result);
|
||||
}
|
||||
|
||||
fn schedule_jobs(pool: db::DbPool) {
|
||||
if CONFIG.job_poll_interval_ms() == 0 {
|
||||
info!("Job scheduler disabled.");
|
||||
return;
|
||||
}
|
||||
thread::Builder::new()
|
||||
.name("job-scheduler".to_string())
|
||||
.spawn(move || {
|
||||
let mut sched = JobScheduler::new();
|
||||
|
||||
// Purge sends that are past their deletion date.
|
||||
if !CONFIG.send_purge_schedule().is_empty() {
|
||||
sched.add(Job::new(CONFIG.send_purge_schedule().parse().unwrap(), || {
|
||||
api::purge_sends(pool.clone());
|
||||
}));
|
||||
}
|
||||
|
||||
// Purge trashed items that are old enough to be auto-deleted.
|
||||
if !CONFIG.trash_purge_schedule().is_empty() {
|
||||
sched.add(Job::new(CONFIG.trash_purge_schedule().parse().unwrap(), || {
|
||||
api::purge_trashed_ciphers(pool.clone());
|
||||
}));
|
||||
}
|
||||
|
||||
// Periodically check for jobs to run. We probably won't need any
|
||||
// jobs that run more often than once a minute, so a default poll
|
||||
// interval of 30 seconds should be sufficient. Users who want to
|
||||
// schedule jobs to run more frequently for some reason can reduce
|
||||
// the poll interval accordingly.
|
||||
loop {
|
||||
sched.tick();
|
||||
thread::sleep(Duration::from_millis(CONFIG.job_poll_interval_ms()));
|
||||
}
|
||||
})
|
||||
.expect("Error spawning job scheduler thread");
|
||||
}
|
||||
|
@@ -198,7 +198,9 @@
|
||||
"amazon.in",
|
||||
"amazon.it",
|
||||
"amazon.nl",
|
||||
"amazon.pl",
|
||||
"amazon.sa",
|
||||
"amazon.se",
|
||||
"amazon.sg"
|
||||
],
|
||||
"Excluded": false
|
||||
@@ -772,7 +774,8 @@
|
||||
"stackoverflow.com",
|
||||
"serverfault.com",
|
||||
"mathoverflow.net",
|
||||
"askubuntu.com"
|
||||
"askubuntu.com",
|
||||
"stackapps.com"
|
||||
],
|
||||
"Excluded": false
|
||||
},
|
||||
|
BIN
src/static/images/fallback-icon.png
Normal file
After Width: | Height: | Size: 331 B |
Before Width: | Height: | Size: 9.2 KiB After Width: | Height: | Size: 6.9 KiB |
Before Width: | Height: | Size: 5.3 KiB After Width: | Height: | Size: 2.5 KiB |
Before Width: | Height: | Size: 1.8 KiB |
BIN
src/static/images/vaultwarden-icon.png
Normal file
After Width: | Height: | Size: 945 B |