mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 02:35:58 +03:00
Compare commits
72 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
5d84f17600 | ||
|
0db4b00007 | ||
|
a0198d8d7c | ||
|
dfad931dca | ||
|
25865efd79 | ||
|
bcf627930e | ||
|
ce70cd2cf4 | ||
|
2ac589d4b4 | ||
|
b2e2aef7de | ||
|
0755bb19c0 | ||
|
fee0c1c711 | ||
|
f58539f0b4 | ||
|
e718afb441 | ||
|
55945ad793 | ||
|
4fd22d8e3b | ||
|
d6a8fb8e48 | ||
|
3b48e6e903 | ||
|
6b9333b33e | ||
|
a545636ee5 | ||
|
f125d5f1a1 | ||
|
ad75ce281e | ||
|
9059437c35 | ||
|
c84db0daca | ||
|
72adc239f5 | ||
|
34ebeeca76 | ||
|
0469d9ba4c | ||
|
eaa6ad06ed | ||
|
0d3f283c37 | ||
|
51a1d641c5 | ||
|
90f7e5ff80 | ||
|
200999c94e | ||
|
d363e647e9 | ||
|
53f58b14d5 | ||
|
ef7835d1b0 | ||
|
3a44dc963b | ||
|
a039e227c7 | ||
|
602b18fdd6 | ||
|
bf04c64759 | ||
|
2f1d86b7f1 | ||
|
ff97bcfdda | ||
|
73f2441d1a | ||
|
ad8484a2d5 | ||
|
9813e480c0 | ||
|
bfe172702a | ||
|
df42b6d6b0 | ||
|
2697fe8aba | ||
|
674e444d67 | ||
|
0d16da440d | ||
|
66cf179bca | ||
|
025bb90f8f | ||
|
d5039d9c17 | ||
|
e7c796a660 | ||
|
bbbd2f6d15 | ||
|
a2d7895586 | ||
|
8a0cb1137e | ||
|
f960bf59bb | ||
|
3a1f1bae00 | ||
|
8dfe805954 | ||
|
07b869b3ef | ||
|
2a18665288 | ||
|
71952a4ab5 | ||
|
994d157064 | ||
|
1dae6093c9 | ||
|
6edceb5f7a | ||
|
359a4a088a | ||
|
3baffeee9a | ||
|
d5c353427d | ||
|
1f868b8d22 | ||
|
8d1df08b81 | ||
|
3b6bccde97 | ||
|
d2b36642a6 | ||
|
a02fb0fd24 |
@@ -15,6 +15,14 @@
|
||||
####################
|
||||
|
||||
## Main data folder
|
||||
## This can be a path to local folder or a path to an external location
|
||||
## depending on features enabled at build time. Possible external locations:
|
||||
##
|
||||
## - AWS S3 Bucket (via `s3` feature): s3://bucket-name/path/to/folder
|
||||
##
|
||||
## When using an external location, make sure to set TMP_FOLDER,
|
||||
## TEMPLATES_FOLDER, and DATABASE_URL to local paths and/or a remote database
|
||||
## location.
|
||||
# DATA_FOLDER=data
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
@@ -22,10 +30,13 @@
|
||||
# ICON_CACHE_FOLDER=data/icon_cache
|
||||
# ATTACHMENTS_FOLDER=data/attachments
|
||||
# SENDS_FOLDER=data/sends
|
||||
|
||||
## Temporary folder used for storing temporary file uploads
|
||||
## Must be a local path.
|
||||
# TMP_FOLDER=data/tmp
|
||||
|
||||
## Templates data folder, by default uses embedded templates
|
||||
## Check source code to see the format
|
||||
## HTML template overrides data folder
|
||||
## Must be a local path.
|
||||
# TEMPLATES_FOLDER=data/templates
|
||||
## Automatically reload the templates for every request, slow, use only for development
|
||||
# RELOAD_TEMPLATES=false
|
||||
@@ -39,7 +50,9 @@
|
||||
#########################
|
||||
|
||||
## Database URL
|
||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||
## When using SQLite, this is the path to the DB file, and it defaults to
|
||||
## %DATA_FOLDER%/db.sqlite3. If DATA_FOLDER is set to an external location, this
|
||||
## must be set to a local sqlite3 file path.
|
||||
# DATABASE_URL=data/db.sqlite3
|
||||
## When using MySQL, specify an appropriate connection URI.
|
||||
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
||||
@@ -117,7 +130,7 @@
|
||||
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||
##
|
||||
## The schedule format is a bit different from crontab as crontab does not contains seconds.
|
||||
## You can test the the format here: https://crontab.guru, but remove the first digit!
|
||||
## You can test the format here: https://crontab.guru, but remove the first digit!
|
||||
## SEC MIN HOUR DAY OF MONTH MONTH DAY OF WEEK
|
||||
## "0 30 9,12,15 1,15 May-Aug Mon,Wed,Fri"
|
||||
## "0 30 * * * * "
|
||||
@@ -229,7 +242,8 @@
|
||||
# SIGNUPS_ALLOWED=true
|
||||
|
||||
## Controls if new users need to verify their email address upon registration
|
||||
## Note that setting this option to true prevents logins until the email address has been verified!
|
||||
## On new client versions, this will require the user to verify their email at signup time.
|
||||
## On older clients, it will require the user to verify their email before they can log in.
|
||||
## The welcome email will include a verification link, and login attempts will periodically
|
||||
## trigger another verification email to be sent.
|
||||
# SIGNUPS_VERIFY=false
|
||||
@@ -259,7 +273,7 @@
|
||||
## A comma-separated list means only those users can create orgs:
|
||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||
|
||||
## Invitations org admins to invite users, even when signups are disabled
|
||||
## Allows org admins to invite users, even when signups are disabled
|
||||
# INVITATIONS_ALLOWED=true
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Vaultwarden
|
||||
@@ -327,32 +341,33 @@
|
||||
|
||||
## Icon download timeout
|
||||
## Configure the timeout value when downloading the favicons.
|
||||
## The default is 10 seconds, but this could be to low on slower network connections
|
||||
## The default is 10 seconds, but this could be too low on slower network connections
|
||||
# ICON_DOWNLOAD_TIMEOUT=10
|
||||
|
||||
## Block HTTP domains/IPs by Regex
|
||||
## Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
|
||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||
## NOTE: Always enclose this regex withing single quotes!
|
||||
## NOTE: Always enclose this regex within single quotes!
|
||||
# HTTP_REQUEST_BLOCK_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||
|
||||
## Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
|
||||
## Enabling this will cause the internal HTTP client to refuse to connect to any non-global IP address.
|
||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
# HTTP_REQUEST_BLOCK_NON_GLOBAL_IPS=true
|
||||
|
||||
## Client Settings
|
||||
## Enable experimental feature flags for clients.
|
||||
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
|
||||
## Note that clients cache the /api/config endpoint for about 1 hour and it could take some time before they are enabled or disabled!
|
||||
##
|
||||
## The following flags are available:
|
||||
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
|
||||
## - "autofill-v2": Use the new autofill implementation.
|
||||
## - "browser-fileless-import": Directly import credentials from other providers without a file.
|
||||
## - "extension-refresh": Temporarily enable the new extension design until general availability (should be used with the beta Chrome extension)
|
||||
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
|
||||
## - "inline-menu-positioning-improvements": Enable the use of inline menu password generator and identity suggestions in the browser extension.
|
||||
## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Needs clients >=2024.12.0)
|
||||
## - "inline-menu-totp": Enable the use of inline menu TOTP codes in the browser extension.
|
||||
## - "ssh-agent": Enable SSH agent support on Desktop. (Needs desktop >=2024.12.0)
|
||||
## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Needs clients >=2024.12.0)
|
||||
## - "export-attachments": Enable support for exporting attachments (Clients >=2025.4.0)
|
||||
## - "anon-addy-self-host-alias": Enable configuring self-hosted Anon Addy alias generator. (Needs Android >=2025.3.0, iOS >=2025.4.0)
|
||||
## - "simple-login-self-host-alias": Enable configuring self-hosted Simple Login alias generator. (Needs Android >=2025.3.0, iOS >=2025.4.0)
|
||||
## - "mutual-tls": Enable the use of mutual TLS on Android (Client >= 2025.2.0)
|
||||
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||
|
||||
## Require new device emails. When a user logs in an email is required to be sent.
|
||||
@@ -486,7 +501,7 @@
|
||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||
# EMAIL_ATTEMPTS_LIMIT=3
|
||||
##
|
||||
## Setup email 2FA regardless of any organization policy
|
||||
## Setup email 2FA on registration regardless of any organization policy
|
||||
# EMAIL_2FA_ENFORCE_ON_VERIFIED_INVITE=false
|
||||
## Automatically setup email 2FA as fallback provider when needed
|
||||
# EMAIL_2FA_AUTO_FALLBACK=false
|
||||
|
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -1,3 +1,6 @@
|
||||
/.github @dani-garcia @BlackDex
|
||||
/.github/** @dani-garcia @BlackDex
|
||||
/.github/CODEOWNERS @dani-garcia @BlackDex
|
||||
/.github/ISSUE_TEMPLATE/** @dani-garcia @BlackDex
|
||||
/.github/workflows/** @dani-garcia @BlackDex
|
||||
/SECURITY.md @dani-garcia @BlackDex
|
||||
|
25
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
25
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -8,15 +8,30 @@ body:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
|
||||
Please *do not* submit feature requests or ask for help on how to configure Vaultwarden here.
|
||||
Please **do not** submit feature requests or ask for help on how to configure Vaultwarden here!
|
||||
|
||||
The [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions/) has sections for Questions and Ideas.
|
||||
|
||||
Our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki/) has topics on how to configure Vaultwarden.
|
||||
|
||||
Also, make sure you are running [](https://github.com/dani-garcia/vaultwarden/releases/latest) of Vaultwarden!
|
||||
And search for existing open or closed issues or discussions regarding your topic before posting.
|
||||
|
||||
Be sure to check and validate the Vaultwarden Admin Diagnostics (`/admin/diagnostics`) page for any errors!
|
||||
See here [how to enable the admin page](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page).
|
||||
|
||||
> [!IMPORTANT]
|
||||
> ## :bangbang: Search for existing **Closed _AND_ Open** [Issues](https://github.com/dani-garcia/vaultwarden/issues?q=is%3Aissue%20) **_AND_** [Discussions](https://github.com/dani-garcia/vaultwarden/discussions?discussions_q=) regarding your topic before posting! :bangbang:
|
||||
#
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: Prerequisites
|
||||
description: Please confirm you have completed the following before submitting an issue!
|
||||
options:
|
||||
- label: I have searched the existing **Closed _AND_ Open** [Issues](https://github.com/dani-garcia/vaultwarden/issues?q=is%3Aissue%20) **_AND_** [Discussions](https://github.com/dani-garcia/vaultwarden/discussions?discussions_q=)
|
||||
required: true
|
||||
- label: I have searched and read the [documentation](https://github.com/dani-garcia/vaultwarden/wiki/)
|
||||
required: true
|
||||
#
|
||||
- id: support-string
|
||||
type: textarea
|
||||
@@ -36,7 +51,7 @@ body:
|
||||
attributes:
|
||||
label: Vaultwarden Build Version
|
||||
description: What version of Vaultwarden are you running?
|
||||
placeholder: ex. v1.31.0 or v1.32.0-3466a804
|
||||
placeholder: ex. v1.34.0 or v1.34.1-53f58b14
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
@@ -67,7 +82,7 @@ body:
|
||||
attributes:
|
||||
label: Reverse Proxy
|
||||
description: Are you using a reverse proxy, if so which and what version?
|
||||
placeholder: ex. nginx 1.26.2, caddy 2.8.4, traefik 3.1.2, haproxy 3.0
|
||||
placeholder: ex. nginx 1.29.0, caddy 2.10.0, traefik 3.4.4, haproxy 3.2
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
@@ -115,7 +130,7 @@ body:
|
||||
attributes:
|
||||
label: Client Version
|
||||
description: What version(s) of the client(s) are you seeing the problem on?
|
||||
placeholder: ex. CLI v2024.7.2, Firefox 130 - v2024.7.0
|
||||
placeholder: ex. CLI v2025.7.0, Firefox 140 - v2025.6.1
|
||||
#
|
||||
- id: reproduce
|
||||
type: textarea
|
||||
|
86
.github/workflows/build.yml
vendored
86
.github/workflows/build.yml
vendored
@@ -1,4 +1,5 @@
|
||||
name: Build
|
||||
permissions: {}
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -13,6 +14,7 @@ on:
|
||||
- "diesel.toml"
|
||||
- "docker/Dockerfile.j2"
|
||||
- "docker/DockerSettings.yaml"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/build.yml"
|
||||
@@ -28,13 +30,17 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and Test ${{ matrix.channel }}
|
||||
permissions:
|
||||
actions: write
|
||||
contents: read
|
||||
# We use Ubuntu 22.04 here because this matches the library versions used within the Debian docker containers
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 120
|
||||
# Make warnings errors, this is to prevent warnings slipping through.
|
||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||
env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
RUSTFLAGS: "-Dwarnings"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -42,32 +48,33 @@ jobs:
|
||||
- "rust-toolchain" # The version defined in rust-toolchain
|
||||
- "msrv" # The supported MSRV
|
||||
|
||||
name: Build and Test ${{ matrix.channel }}
|
||||
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: "Install dependencies Ubuntu"
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||
# End Install dependencies
|
||||
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
# End Checkout the repo
|
||||
|
||||
# Determine rust-toolchain version
|
||||
- name: Init Variables
|
||||
id: toolchain
|
||||
shell: bash
|
||||
env:
|
||||
CHANNEL: ${{ matrix.channel }}
|
||||
run: |
|
||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||
if [[ "${CHANNEL}" == 'rust-toolchain' ]]; then
|
||||
RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)"
|
||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||
elif [[ "${CHANNEL}" == 'msrv' ]]; then
|
||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||
else
|
||||
RUST_TOOLCHAIN="${{ matrix.channel }}"
|
||||
RUST_TOOLCHAIN="${CHANNEL}"
|
||||
fi
|
||||
echo "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" | tee -a "${GITHUB_OUTPUT}"
|
||||
# End Determine rust-toolchain version
|
||||
@@ -75,7 +82,7 @@ jobs:
|
||||
|
||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203 # master @ Dec 14, 2024, 5:49 AM GMT+1
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master @ Apr 29, 2025, 9:22 PM GMT+2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
@@ -85,7 +92,7 @@ jobs:
|
||||
|
||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||
- name: "Install MSRV version"
|
||||
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203 # master @ Dec 14, 2024, 5:49 AM GMT+1
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master @ Apr 29, 2025, 9:22 PM GMT+2
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
@@ -93,11 +100,13 @@ jobs:
|
||||
|
||||
# Set the current matrix toolchain version as default
|
||||
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||
env:
|
||||
RUST_TOOLCHAIN: ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||
run: |
|
||||
# Remove the rust-toolchain.toml
|
||||
rm rust-toolchain.toml
|
||||
# Set the default
|
||||
rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||
rustup default "${RUST_TOOLCHAIN}"
|
||||
|
||||
# Show environment
|
||||
- name: "Show environment"
|
||||
@@ -108,7 +117,7 @@ jobs:
|
||||
|
||||
# Enable Rust Caching
|
||||
- name: Rust Caching
|
||||
uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7
|
||||
uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
with:
|
||||
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||
@@ -161,7 +170,7 @@ jobs:
|
||||
id: clippy
|
||||
if: ${{ !cancelled() && matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc
|
||||
# End Run cargo clippy
|
||||
|
||||
|
||||
@@ -178,22 +187,31 @@ jobs:
|
||||
# This is useful so all test/clippy/fmt actions are done, and they can all be addressed
|
||||
- name: "Some checks failed"
|
||||
if: ${{ failure() }}
|
||||
env:
|
||||
TEST_DB_M_L: ${{ steps.test_sqlite_mysql_postgresql_mimalloc_logger.outcome }}
|
||||
TEST_DB_M: ${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}
|
||||
TEST_DB: ${{ steps.test_sqlite_mysql_postgresql.outcome }}
|
||||
TEST_SQLITE: ${{ steps.test_sqlite.outcome }}
|
||||
TEST_MYSQL: ${{ steps.test_mysql.outcome }}
|
||||
TEST_POSTGRESQL: ${{ steps.test_postgresql.outcome }}
|
||||
CLIPPY: ${{ steps.clippy.outcome }}
|
||||
FMT: ${{ steps.formatting.outcome }}
|
||||
run: |
|
||||
echo "### :x: Checks Failed!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|Job|Status|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|---|------|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc,query_logger)|${{ steps.test_sqlite_mysql_postgresql_mimalloc_logger.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql)|${{ steps.test_sqlite_mysql_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite)|${{ steps.test_sqlite.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (mysql)|${{ steps.test_mysql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (postgresql)|${{ steps.test_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.clippy.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|fmt|${{ steps.formatting.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Please check the failed jobs and fix where needed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### :x: Checks Failed!" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|Job|Status|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|---|------|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc,query_logger)|${TEST_DB_M_L}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${TEST_DB_M}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|test (sqlite,mysql,postgresql)|${TEST_DB}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|test (sqlite)|${TEST_SQLITE}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|test (mysql)|${TEST_MYSQL}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|test (postgresql)|${TEST_POSTGRESQL}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${CLIPPY}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "|fmt|${FMT}|" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "Please check the failed jobs and fix where needed." >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "" >> "${GITHUB_STEP_SUMMARY}"
|
||||
exit 1
|
||||
|
||||
|
||||
@@ -202,5 +220,5 @@ jobs:
|
||||
- name: "All checks passed"
|
||||
if: ${{ success() }}
|
||||
run: |
|
||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### :tada: Checks Passed!" >> "${GITHUB_STEP_SUMMARY}"
|
||||
echo "" >> "${GITHUB_STEP_SUMMARY}"
|
||||
|
29
.github/workflows/check-templates.yml
vendored
Normal file
29
.github/workflows/check-templates.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Check templates
|
||||
permissions: {}
|
||||
|
||||
on: [ push, pull_request ]
|
||||
|
||||
jobs:
|
||||
docker-templates:
|
||||
name: Validate docker templates
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
# End Checkout the repo
|
||||
|
||||
- name: Run make to rebuild templates
|
||||
working-directory: docker
|
||||
run: make
|
||||
|
||||
- name: Check for unstaged changes
|
||||
working-directory: docker
|
||||
run: git diff --exit-code
|
||||
continue-on-error: false
|
22
.github/workflows/hadolint.yml
vendored
22
.github/workflows/hadolint.yml
vendored
@@ -1,24 +1,20 @@
|
||||
name: Hadolint
|
||||
permissions: {}
|
||||
|
||||
on: [
|
||||
push,
|
||||
pull_request
|
||||
]
|
||||
on: [ push, pull_request ]
|
||||
|
||||
jobs:
|
||||
hadolint:
|
||||
name: Validate Dockerfile syntax
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
# End Checkout the repo
|
||||
|
||||
steps:
|
||||
# Start Docker Buildx
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
# https://github.com/moby/buildkit/issues/3969
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||
with:
|
||||
@@ -37,6 +33,12 @@ jobs:
|
||||
env:
|
||||
HADOLINT_VERSION: 2.12.0
|
||||
# End Download hadolint
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
# End Checkout the repo
|
||||
|
||||
# Test Dockerfiles with hadolint
|
||||
- name: Run hadolint
|
||||
|
112
.github/workflows/release.yml
vendored
112
.github/workflows/release.yml
vendored
@@ -1,4 +1,5 @@
|
||||
name: Release
|
||||
permissions: {}
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -6,17 +7,23 @@ on:
|
||||
- main
|
||||
|
||||
tags:
|
||||
- '*'
|
||||
# https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#filter-pattern-cheat-sheet
|
||||
- '[1-2].[0-9]+.[0-9]+'
|
||||
|
||||
jobs:
|
||||
# https://github.com/marketplace/actions/skip-duplicate-actions
|
||||
# Some checks to determine if we need to continue with building a new docker.
|
||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||
skip_check:
|
||||
runs-on: ubuntu-24.04
|
||||
# Only run this in the upstream repo and not on forks
|
||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||
name: Cancel older jobs when running
|
||||
permissions:
|
||||
actions: write
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
|
||||
steps:
|
||||
- name: Skip Duplicates Actions
|
||||
id: skip_check
|
||||
@@ -27,6 +34,9 @@ jobs:
|
||||
if: ${{ github.ref_type == 'branch' }}
|
||||
|
||||
docker-build:
|
||||
needs: skip_check
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
name: Build Vaultwarden containers
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
@@ -34,12 +44,10 @@ jobs:
|
||||
id-token: write
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120
|
||||
needs: skip_check
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
# Start a local docker registry to extract the compiled binaries to upload as artifacts and attest them
|
||||
services:
|
||||
registry:
|
||||
image: registry:2
|
||||
image: registry@sha256:1fc7de654f2ac1247f0b67e8a459e273b0993be7d2beda1f3f56fbf1001ed3e7 # v3.0.0
|
||||
ports:
|
||||
- 5000:5000
|
||||
env:
|
||||
@@ -61,37 +69,42 @@ jobs:
|
||||
base_image: ["debian","alpine"]
|
||||
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Initialize QEMU binfmt support
|
||||
uses: docker/setup-qemu-action@53851d14592bedcffcf25ea515637cff71ef929a # v3.3.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
with:
|
||||
platforms: "arm64,arm"
|
||||
|
||||
# Start Docker Buildx
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
# https://github.com/moby/buildkit/issues/3969
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||
with:
|
||||
cache-binary: false
|
||||
buildkitd-config-inline: |
|
||||
[worker.oci]
|
||||
max-parallelism = 2
|
||||
driver-opts: |
|
||||
network=host
|
||||
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
# We need fetch-depth of 0 so we also get all the tag metadata
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
# Determine Base Tags and Source Version
|
||||
- name: Determine Base Tags and Source Version
|
||||
shell: bash
|
||||
env:
|
||||
REF_TYPE: ${{ github.ref_type }}
|
||||
run: |
|
||||
# Check which main tag we are going to build determined by github.ref_type
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
# Check which main tag we are going to build determined by ref_type
|
||||
if [[ "${REF_TYPE}" == "tag" ]]; then
|
||||
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
|
||||
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||
elif [[ "${REF_TYPE}" == "branch" ]]; then
|
||||
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
|
||||
fi
|
||||
|
||||
@@ -107,7 +120,7 @@ jobs:
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -116,12 +129,14 @@ jobs:
|
||||
- name: Add registry for DockerHub
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
shell: bash
|
||||
env:
|
||||
DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }}
|
||||
run: |
|
||||
echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||
echo "CONTAINER_REGISTRIES=${DOCKERHUB_REPO}" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -131,12 +146,14 @@ jobs:
|
||||
- name: Add registry for ghcr.io
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
shell: bash
|
||||
env:
|
||||
GHCR_REPO: ${{ vars.GHCR_REPO }}
|
||||
run: |
|
||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${GHCR_REPO}" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
# Login to Quay.io
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
@@ -146,17 +163,22 @@ jobs:
|
||||
- name: Add registry for Quay.io
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
shell: bash
|
||||
env:
|
||||
QUAY_REPO: ${{ vars.QUAY_REPO }}
|
||||
run: |
|
||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${QUAY_REPO}" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
- name: Configure build cache from/to
|
||||
shell: bash
|
||||
env:
|
||||
GHCR_REPO: ${{ vars.GHCR_REPO }}
|
||||
BASE_IMAGE: ${{ matrix.base_image }}
|
||||
run: |
|
||||
#
|
||||
# Check if there is a GitHub Container Registry Login and use it for caching
|
||||
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
||||
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
|
||||
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
|
||||
echo "BAKE_CACHE_FROM=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}" | tee -a "${GITHUB_ENV}"
|
||||
echo "BAKE_CACHE_TO=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
|
||||
else
|
||||
echo "BAKE_CACHE_FROM="
|
||||
echo "BAKE_CACHE_TO="
|
||||
@@ -170,7 +192,7 @@ jobs:
|
||||
|
||||
- name: Bake ${{ matrix.base_image }} containers
|
||||
id: bake_vw
|
||||
uses: docker/bake-action@5ca506d06f70338a4968df87fd8bfee5cbfb84c7 # v6.0.0
|
||||
uses: docker/bake-action@37816e747588cb137173af99ab33873600c46ea8 # v6.8.0
|
||||
env:
|
||||
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||
@@ -189,14 +211,17 @@ jobs:
|
||||
|
||||
- name: Extract digest SHA
|
||||
shell: bash
|
||||
env:
|
||||
BAKE_METADATA: ${{ steps.bake_vw.outputs.metadata }}
|
||||
BASE_IMAGE: ${{ matrix.base_image }}
|
||||
run: |
|
||||
GET_DIGEST_SHA="$(jq -r '.["${{ matrix.base_image }}-multi"]."containerimage.digest"' <<< '${{ steps.bake_vw.outputs.metadata }}')"
|
||||
GET_DIGEST_SHA="$(jq -r --arg base "$BASE_IMAGE" '.[$base + "-multi"]."containerimage.digest"' <<< "${BAKE_METADATA}")"
|
||||
echo "DIGEST_SHA=${GET_DIGEST_SHA}" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
# Attest container images
|
||||
- name: Attest - docker.io - ${{ matrix.base_image }}
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
|
||||
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
|
||||
uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0
|
||||
with:
|
||||
subject-name: ${{ vars.DOCKERHUB_REPO }}
|
||||
subject-digest: ${{ env.DIGEST_SHA }}
|
||||
@@ -204,7 +229,7 @@ jobs:
|
||||
|
||||
- name: Attest - ghcr.io - ${{ matrix.base_image }}
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
|
||||
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
|
||||
uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0
|
||||
with:
|
||||
subject-name: ${{ vars.GHCR_REPO }}
|
||||
subject-digest: ${{ env.DIGEST_SHA }}
|
||||
@@ -212,7 +237,7 @@ jobs:
|
||||
|
||||
- name: Attest - quay.io - ${{ matrix.base_image }}
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
|
||||
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
|
||||
uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0
|
||||
with:
|
||||
subject-name: ${{ vars.QUAY_REPO }}
|
||||
subject-digest: ${{ env.DIGEST_SHA }}
|
||||
@@ -222,16 +247,19 @@ jobs:
|
||||
# Extract the Alpine binaries from the containers
|
||||
- name: Extract binaries
|
||||
shell: bash
|
||||
env:
|
||||
REF_TYPE: ${{ github.ref_type }}
|
||||
BASE_IMAGE: ${{ matrix.base_image }}
|
||||
run: |
|
||||
# Check which main tag we are going to build determined by github.ref_type
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
# Check which main tag we are going to build determined by ref_type
|
||||
if [[ "${REF_TYPE}" == "tag" ]]; then
|
||||
EXTRACT_TAG="latest"
|
||||
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||
elif [[ "${REF_TYPE}" == "branch" ]]; then
|
||||
EXTRACT_TAG="testing"
|
||||
fi
|
||||
|
||||
# Check which base_image was used and append -alpine if needed
|
||||
if [[ "${{ matrix.base_image }}" == "alpine" ]]; then
|
||||
if [[ "${BASE_IMAGE}" == "alpine" ]]; then
|
||||
EXTRACT_TAG="${EXTRACT_TAG}-alpine"
|
||||
fi
|
||||
|
||||
@@ -240,55 +268,55 @@ jobs:
|
||||
|
||||
# Extract amd64 binary
|
||||
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
docker cp amd64:/vaultwarden vaultwarden-amd64-${{ matrix.base_image }}
|
||||
docker cp amd64:/vaultwarden vaultwarden-amd64-${BASE_IMAGE}
|
||||
docker rm --force amd64
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
|
||||
# Extract arm64 binary
|
||||
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
docker cp arm64:/vaultwarden vaultwarden-arm64-${{ matrix.base_image }}
|
||||
docker cp arm64:/vaultwarden vaultwarden-arm64-${BASE_IMAGE}
|
||||
docker rm --force arm64
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
|
||||
# Extract armv7 binary
|
||||
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
docker cp armv7:/vaultwarden vaultwarden-armv7-${{ matrix.base_image }}
|
||||
docker cp armv7:/vaultwarden vaultwarden-armv7-${BASE_IMAGE}
|
||||
docker rm --force armv7
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
|
||||
# Extract armv6 binary
|
||||
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
docker cp armv6:/vaultwarden vaultwarden-armv6-${{ matrix.base_image }}
|
||||
docker cp armv6:/vaultwarden vaultwarden-armv6-${BASE_IMAGE}
|
||||
docker rm --force armv6
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
|
||||
# Upload artifacts to Github Actions and Attest the binaries
|
||||
- name: "Upload amd64 artifact ${{ matrix.base_image }}"
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64-${{ matrix.base_image }}
|
||||
path: vaultwarden-amd64-${{ matrix.base_image }}
|
||||
|
||||
- name: "Upload arm64 artifact ${{ matrix.base_image }}"
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64-${{ matrix.base_image }}
|
||||
path: vaultwarden-arm64-${{ matrix.base_image }}
|
||||
|
||||
- name: "Upload armv7 artifact ${{ matrix.base_image }}"
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7-${{ matrix.base_image }}
|
||||
path: vaultwarden-armv7-${{ matrix.base_image }}
|
||||
|
||||
- name: "Upload armv6 artifact ${{ matrix.base_image }}"
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6-${{ matrix.base_image }}
|
||||
path: vaultwarden-armv6-${{ matrix.base_image }}
|
||||
|
||||
- name: "Attest artifacts ${{ matrix.base_image }}"
|
||||
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
|
||||
uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0
|
||||
with:
|
||||
subject-path: vaultwarden-*
|
||||
# End Upload artifacts to Github Actions
|
||||
|
6
.github/workflows/releasecache-cleanup.yml
vendored
6
.github/workflows/releasecache-cleanup.yml
vendored
@@ -1,3 +1,6 @@
|
||||
name: Cleanup
|
||||
permissions: {}
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -9,10 +12,11 @@ on:
|
||||
schedule:
|
||||
- cron: '0 1 * * FRI'
|
||||
|
||||
name: Cleanup
|
||||
jobs:
|
||||
releasecache-cleanup:
|
||||
name: Releasecache Cleanup
|
||||
permissions:
|
||||
packages: write
|
||||
runs-on: ubuntu-24.04
|
||||
continue-on-error: true
|
||||
timeout-minutes: 30
|
||||
|
29
.github/workflows/trivy.yml
vendored
29
.github/workflows/trivy.yml
vendored
@@ -1,37 +1,42 @@
|
||||
name: trivy
|
||||
name: Trivy
|
||||
permissions: {}
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
branches:
|
||||
- main
|
||||
|
||||
schedule:
|
||||
- cron: '08 11 * * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
trivy-scan:
|
||||
# Only run this in the master repo and not on forks
|
||||
# Only run this in the upstream repo and not on forks
|
||||
# When all forks run this at the same time, it is causing `Too Many Requests` issues
|
||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||
name: Check
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 30
|
||||
name: Trivy Scan
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
actions: read
|
||||
security-events: write
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@18f2510ee396bbf400402947b394f2dd8c87dbb0 # v0.29.0
|
||||
uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4 # v0.32.0
|
||||
env:
|
||||
TRIVY_DB_REPOSITORY: docker.io/aquasec/trivy-db:2,public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2
|
||||
TRIVY_JAVA_DB_REPOSITORY: docker.io/aquasec/trivy-java-db:1,public.ecr.aws/aquasecurity/trivy-java-db:1,ghcr.io/aquasecurity/trivy-java-db:1
|
||||
@@ -43,6 +48,6 @@ jobs:
|
||||
severity: CRITICAL,HIGH
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@86b04fb0e47484f7282357688f21d5d0e32175fe # v3.27.5
|
||||
uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
28
.github/workflows/zizmor.yml
vendored
Normal file
28
.github/workflows/zizmor.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Security Analysis with zizmor
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
branches: ["**"]
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
zizmor:
|
||||
name: Run zizmor
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run zizmor
|
||||
uses: zizmorcore/zizmor-action@f52a838cfabf134edcbaa7c8b3677dde20045018 # v0.1.1
|
||||
with:
|
||||
# intentionally not scanning the entire repository,
|
||||
# since it contains integration tests.
|
||||
inputs: ./.github/
|
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
@@ -31,7 +31,7 @@ repos:
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$)
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
@@ -40,5 +40,13 @@ repos:
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|clippy.toml|.*\.rs$)
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: check-docker-templates
|
||||
name: check docker templates
|
||||
description: Check if the Docker templates are updated
|
||||
language: system
|
||||
entry: sh
|
||||
args:
|
||||
- "-c"
|
||||
- "cd docker && make"
|
||||
|
2588
Cargo.lock
generated
2588
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
100
Cargo.toml
100
Cargo.toml
@@ -1,11 +1,12 @@
|
||||
workspace = { members = ["macros"] }
|
||||
[workspace]
|
||||
members = ["macros"]
|
||||
|
||||
[package]
|
||||
name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.83.0"
|
||||
rust-version = "1.86.0"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
@@ -31,6 +32,7 @@ enable_mimalloc = ["dep:mimalloc"]
|
||||
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
||||
# if you want to turn off the logging for a specific run.
|
||||
query_logger = ["dep:diesel_logger"]
|
||||
s3 = ["opendal/services-s3", "dep:aws-config", "dep:aws-credential-types", "dep:aws-smithy-runtime-api", "dep:anyhow", "dep:http", "dep:reqsign"]
|
||||
|
||||
# Enable unstable features, requires nightly
|
||||
# Currently only used to enable rusts official ip support
|
||||
@@ -44,7 +46,7 @@ syslog = "7.0.0"
|
||||
macros = { path = "./macros" }
|
||||
|
||||
# Logging
|
||||
log = "0.4.25"
|
||||
log = "0.4.27"
|
||||
fern = { version = "0.7.1", features = ["syslog-7", "reopen-1"] }
|
||||
tracing = { version = "0.1.41", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
@@ -52,12 +54,12 @@ tracing = { version = "0.1.41", features = ["log"] } # Needed to have lettre and
|
||||
dotenvy = { version = "0.15.7", default-features = false }
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.20.2"
|
||||
once_cell = "1.21.3"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.19"
|
||||
num-derive = "0.4.2"
|
||||
bigdecimal = "0.4.7"
|
||||
bigdecimal = "0.4.8"
|
||||
|
||||
# Web framework
|
||||
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
|
||||
@@ -71,49 +73,51 @@ dashmap = "6.1.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.31"
|
||||
tokio = { version = "1.43.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||
tokio = { version = "1.47.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||
tokio-util = { version = "0.7.15", features = ["compat"]}
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.217", features = ["derive"] }
|
||||
serde_json = "1.0.138"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.141"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "2.2.7", features = ["chrono", "r2d2", "numeric"] }
|
||||
diesel = { version = "2.2.12", features = ["chrono", "r2d2", "numeric"] }
|
||||
diesel_migrations = "2.2.0"
|
||||
diesel_logger = { version = "0.4.0", optional = true }
|
||||
|
||||
derive_more = { version = "1.0.0", features = ["from", "into", "as_ref", "deref", "display"] }
|
||||
derive_more = { version = "2.0.1", features = ["from", "into", "as_ref", "deref", "display"] }
|
||||
diesel-derive-newtype = "2.1.2"
|
||||
|
||||
# Bundled/Static SQLite
|
||||
libsqlite3-sys = { version = "0.31.0", features = ["bundled"], optional = true }
|
||||
libsqlite3-sys = { version = "0.35.0", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = "0.9.0"
|
||||
ring = "0.17.8"
|
||||
rand = "0.9.2"
|
||||
ring = "0.17.14"
|
||||
subtle = "2.6.1"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "1.12.1", features = ["v4"] }
|
||||
uuid = { version = "1.17.0", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.39", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.10.1"
|
||||
time = "0.3.37"
|
||||
chrono = { version = "0.4.41", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.10.4"
|
||||
time = "0.3.41"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler_ng = "2.0.5"
|
||||
job_scheduler_ng = "2.2.0"
|
||||
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.7.0"
|
||||
data-encoding = "2.9.0"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "9.3.0"
|
||||
jsonwebtoken = "9.3.1"
|
||||
|
||||
# TOTP library
|
||||
totp-lite = "2.0.1"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.12.0", features = ["online-tokio"], default-features = false }
|
||||
yubico = { package = "yubico_ng", version = "0.13.0", features = ["online-tokio"], default-features = false }
|
||||
|
||||
# WebAuthn libraries
|
||||
webauthn-rs = "0.3.2"
|
||||
@@ -122,63 +126,72 @@ webauthn-rs = "0.3.2"
|
||||
url = "2.5.4"
|
||||
|
||||
# Email libraries
|
||||
lettre = { version = "0.11.11", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
lettre = { version = "0.11.18", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "hostname", "tracing", "tokio1-rustls", "ring", "rustls-native-certs"], default-features = false }
|
||||
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.9"
|
||||
|
||||
# HTML Template library
|
||||
handlebars = { version = "6.3.0", features = ["dir_source"] }
|
||||
handlebars = { version = "6.3.2", features = ["dir_source"] }
|
||||
|
||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||
reqwest = { version = "0.12.12", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||
hickory-resolver = "0.24.2"
|
||||
reqwest = { version = "0.12.22", features = ["rustls-tls", "rustls-tls-native-roots", "stream", "json", "deflate", "gzip", "brotli", "zstd", "socks", "cookies", "charset", "http2", "system-proxy"], default-features = false}
|
||||
hickory-resolver = "0.25.2"
|
||||
|
||||
# Favicon extraction libraries
|
||||
html5gum = "0.7.0"
|
||||
regex = { version = "1.11.1", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.3.1"
|
||||
bytes = "1.9.0"
|
||||
bytes = "1.10.1"
|
||||
svg-hush = "0.9.5"
|
||||
|
||||
# Cache function results (Used for version check and favicon fetching)
|
||||
cached = { version = "0.54.0", features = ["async"] }
|
||||
cached = { version = "0.56.0", features = ["async"] }
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.18.1"
|
||||
cookie_store = "0.21.1"
|
||||
|
||||
# Used by U2F, JWT and PostgreSQL
|
||||
openssl = "0.10.69"
|
||||
openssl = "0.10.73"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.15"
|
||||
governor = "0.8.0"
|
||||
pastey = "0.1.0"
|
||||
governor = "0.10.0"
|
||||
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.25"
|
||||
semver = "1.0.26"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
|
||||
which = "7.0.1"
|
||||
mimalloc = { version = "0.1.47", features = ["secure"], default-features = false, optional = true }
|
||||
|
||||
which = "8.0.0"
|
||||
|
||||
# Argon2 library with support for the PHC format
|
||||
argon2 = "0.5.3"
|
||||
|
||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||
rpassword = "7.3.1"
|
||||
rpassword = "7.4.0"
|
||||
|
||||
# Loading a dynamic CSS Stylesheet
|
||||
grass_compiler = { version = "0.13.4", default-features = false }
|
||||
|
||||
[patch.crates-io]
|
||||
# Patch yubico to remove duplicate crates of older versions
|
||||
yubico = { git = "https://github.com/BlackDex/yubico-rs", rev = "00df14811f58155c0f02e3ab10f1570ed3e115c6" }
|
||||
# File are accessed through Apache OpenDAL
|
||||
opendal = { version = "0.54.0", features = ["services-fs"], default-features = false }
|
||||
|
||||
# For retrieving AWS credentials, including temporary SSO credentials
|
||||
anyhow = { version = "1.0.98", optional = true }
|
||||
aws-config = { version = "1.8.3", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true }
|
||||
aws-credential-types = { version = "1.2.4", optional = true }
|
||||
aws-smithy-runtime-api = { version = "1.8.5", optional = true }
|
||||
http = { version = "1.3.1", optional = true }
|
||||
reqsign = { version = "0.16.5", optional = true }
|
||||
|
||||
# Strip debuginfo from the release builds
|
||||
# The symbols are the provide better panic traces
|
||||
# The debug symbols are to provide better panic traces
|
||||
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
@@ -213,7 +226,7 @@ codegen-units = 16
|
||||
|
||||
# Linting config
|
||||
# https://doc.rust-lang.org/rustc/lints/groups.html
|
||||
[lints.rust]
|
||||
[workspace.lints.rust]
|
||||
# Forbid
|
||||
unsafe_code = "forbid"
|
||||
non_ascii_idents = "forbid"
|
||||
@@ -243,11 +256,14 @@ if_let_rescope = "allow"
|
||||
tail_expr_drop_order = "allow"
|
||||
|
||||
# https://rust-lang.github.io/rust-clippy/stable/index.html
|
||||
[lints.clippy]
|
||||
[workspace.lints.clippy]
|
||||
# Warn
|
||||
dbg_macro = "warn"
|
||||
todo = "warn"
|
||||
|
||||
# Ignore/Allow
|
||||
result_large_err = "allow"
|
||||
|
||||
# Deny
|
||||
case_sensitive_file_extension_comparisons = "deny"
|
||||
cast_lossless = "deny"
|
||||
@@ -263,7 +279,6 @@ macro_use_imports = "deny"
|
||||
manual_assert = "deny"
|
||||
manual_instant_elapsed = "deny"
|
||||
manual_string_new = "deny"
|
||||
match_on_vec_items = "deny"
|
||||
match_wildcard_for_single_variants = "deny"
|
||||
mem_forget = "deny"
|
||||
needless_continue = "deny"
|
||||
@@ -278,3 +293,6 @@ unused_async = "deny"
|
||||
unused_self = "deny"
|
||||
verbose_file_reads = "deny"
|
||||
zero_sized_map_values = "deny"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
24
README.md
24
README.md
@@ -59,19 +59,21 @@ A nearly complete implementation of the Bitwarden Client API is provided, includ
|
||||
## Usage
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
>
|
||||
>This can be configured in [Vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
>
|
||||
>If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy or Traefik (see examples linked above).
|
||||
> The web-vault requires the use a secure context for the [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API).
|
||||
> That means it will only work via `http://localhost:8000` (using the port from the example below) or if you [enable HTTPS](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS).
|
||||
|
||||
The recommended way to install and use Vaultwarden is via our container images which are published to [ghcr.io](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden), [docker.io](https://hub.docker.com/r/vaultwarden/server) and [quay.io](https://quay.io/repository/vaultwarden/server).
|
||||
See [which container image to use](https://github.com/dani-garcia/vaultwarden/wiki/Which-container-image-to-use) for an explanation of the provided tags.
|
||||
|
||||
There are also [community driven packages](https://github.com/dani-garcia/vaultwarden/wiki/Third-party-packages) which can be used, but those might be lagging behind the latest version or might deviate in the way Vaultwarden is configured, as described in our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki).
|
||||
|
||||
Alternatively, you can also [build Vaultwarden](https://github.com/dani-garcia/vaultwarden/wiki/Building-binary) yourself.
|
||||
|
||||
While Vaultwarden is based upon the [Rocket web framework](https://rocket.rs) which has built-in support for TLS our recommendation would be that you setup a reverse proxy (see [proxy examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
|
||||
> [!TIP]
|
||||
>**For more detailed examples on how to install, use and configure Vaultwarden you can check our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki).**
|
||||
|
||||
The main way to use Vaultwarden is via our container images which are published to [ghcr.io](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden), [docker.io](https://hub.docker.com/r/vaultwarden/server) and [quay.io](https://quay.io/repository/vaultwarden/server).
|
||||
|
||||
There are also [community driven packages](https://github.com/dani-garcia/vaultwarden/wiki/Third-party-packages) which can be used, but those might be lagging behind the latest version or might deviate in the way Vaultwarden is configured, as described in our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki).
|
||||
|
||||
### Docker/Podman CLI
|
||||
|
||||
Pull the container image and mount a volume from the host for persistent storage.<br>
|
||||
@@ -83,7 +85,7 @@ docker run --detach --name vaultwarden \
|
||||
--env DOMAIN="https://vw.domain.tld" \
|
||||
--volume /vw-data/:/data/ \
|
||||
--restart unless-stopped \
|
||||
--publish 80:80 \
|
||||
--publish 127.0.0.1:8000:80 \
|
||||
vaultwarden/server:latest
|
||||
```
|
||||
|
||||
@@ -104,7 +106,7 @@ services:
|
||||
volumes:
|
||||
- ./vw-data/:/data/
|
||||
ports:
|
||||
- 80:80
|
||||
- 127.0.0.1:8000:80
|
||||
```
|
||||
|
||||
<br>
|
||||
|
7
build.rs
7
build.rs
@@ -11,6 +11,8 @@ fn main() {
|
||||
println!("cargo:rustc-cfg=postgresql");
|
||||
#[cfg(feature = "query_logger")]
|
||||
println!("cargo:rustc-cfg=query_logger");
|
||||
#[cfg(feature = "s3")]
|
||||
println!("cargo:rustc-cfg=s3");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!(
|
||||
@@ -23,6 +25,7 @@ fn main() {
|
||||
println!("cargo::rustc-check-cfg=cfg(mysql)");
|
||||
println!("cargo::rustc-check-cfg=cfg(postgresql)");
|
||||
println!("cargo::rustc-check-cfg=cfg(query_logger)");
|
||||
println!("cargo::rustc-check-cfg=cfg(s3)");
|
||||
|
||||
// Rerun when these paths are changed.
|
||||
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
||||
@@ -48,8 +51,8 @@ fn main() {
|
||||
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||
let out = Command::new(args[0]).args(&args[1..]).output()?;
|
||||
if !out.status.success() {
|
||||
use std::io::{Error, ErrorKind};
|
||||
return Err(Error::new(ErrorKind::Other, "Command not successful"));
|
||||
use std::io::Error;
|
||||
return Err(Error::other("Command not successful"));
|
||||
}
|
||||
Ok(String::from_utf8(out.stdout).unwrap().trim().to_string())
|
||||
}
|
||||
|
@@ -1,13 +1,13 @@
|
||||
---
|
||||
vault_version: "v2025.1.1"
|
||||
vault_image_digest: "sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918"
|
||||
vault_version: "v2025.7.0"
|
||||
vault_image_digest: "sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e"
|
||||
# Cross Compile Docker Helper Scripts v1.6.1
|
||||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
|
||||
xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894"
|
||||
rust_version: 1.84.1 # Rust version to be used
|
||||
rust_version: 1.88.0 # Rust version to be used
|
||||
debian_version: bookworm # Debian release name to be used
|
||||
alpine_version: "3.21" # Alpine version to be used
|
||||
alpine_version: "3.22" # Alpine version to be used
|
||||
# For which platforms/architectures will we try to build images
|
||||
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||
# Determine the build images per OS/Arch
|
||||
|
@@ -19,23 +19,23 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2025.1.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.1.1
|
||||
# [docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2025.7.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.7.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918
|
||||
# [docker.io/vaultwarden/web-vault:v2025.1.1]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e
|
||||
# [docker.io/vaultwarden/web-vault:v2025.7.0]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918 AS vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e AS vault
|
||||
|
||||
########################## ALPINE BUILD IMAGES ##########################
|
||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.84.1 AS build_amd64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.84.1 AS build_arm64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.84.1 AS build_armv7
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.84.1 AS build_armv6
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.88.0 AS build_amd64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.88.0 AS build_arm64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.88.0 AS build_armv7
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.88.0 AS build_armv6
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
@@ -127,7 +127,7 @@ RUN source /env-cargo && \
|
||||
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||
#
|
||||
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.21
|
||||
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.22
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
|
@@ -19,15 +19,15 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2025.1.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.1.1
|
||||
# [docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2025.7.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.7.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918
|
||||
# [docker.io/vaultwarden/web-vault:v2025.1.1]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e
|
||||
# [docker.io/vaultwarden/web-vault:v2025.7.0]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918 AS vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f307a19278b15e AS vault
|
||||
|
||||
########################## Cross Compile Docker Helper Scripts ##########################
|
||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||
@@ -36,7 +36,7 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:9c207bead753dda9430bd
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.84.1-slim-bookworm AS build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.88.0-slim-bookworm AS build
|
||||
COPY --from=xx / /
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
@@ -89,24 +89,24 @@ RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Environment variables for Cargo on Debian based builds
|
||||
ARG ARCH_OPENSSL_LIB_DIR \
|
||||
ARCH_OPENSSL_INCLUDE_DIR
|
||||
ARG TARGET_PKG_CONFIG_PATH
|
||||
|
||||
RUN source /env-cargo && \
|
||||
if xx-info is-cross ; then \
|
||||
# Some special variables if needed to override some build paths
|
||||
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||
fi && \
|
||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||
echo "export PKG_CONFIG_ALLOW_CROSS=1" >> /env-cargo && \
|
||||
# For some architectures `xx-info` returns a triple which doesn't matches the path on disk
|
||||
# In those cases you can override this by setting the `TARGET_PKG_CONFIG_PATH` build-arg
|
||||
if [[ -n "${TARGET_PKG_CONFIG_PATH}" ]]; then \
|
||||
echo "export TARGET_PKG_CONFIG_PATH=${TARGET_PKG_CONFIG_PATH}" >> /env-cargo ; \
|
||||
else \
|
||||
echo "export PKG_CONFIG_PATH=/usr/lib/$(xx-info)/pkgconfig" >> /env-cargo ; \
|
||||
fi && \
|
||||
echo "# End of env-cargo" >> /env-cargo ; \
|
||||
fi && \
|
||||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
@@ -109,24 +109,24 @@ WORKDIR /app
|
||||
|
||||
{% if base == "debian" %}
|
||||
# Environment variables for Cargo on Debian based builds
|
||||
ARG ARCH_OPENSSL_LIB_DIR \
|
||||
ARCH_OPENSSL_INCLUDE_DIR
|
||||
ARG TARGET_PKG_CONFIG_PATH
|
||||
|
||||
RUN source /env-cargo && \
|
||||
if xx-info is-cross ; then \
|
||||
# Some special variables if needed to override some build paths
|
||||
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||
fi && \
|
||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||
echo "export PKG_CONFIG_ALLOW_CROSS=1" >> /env-cargo && \
|
||||
# For some architectures `xx-info` returns a triple which doesn't matches the path on disk
|
||||
# In those cases you can override this by setting the `TARGET_PKG_CONFIG_PATH` build-arg
|
||||
if [[ -n "${TARGET_PKG_CONFIG_PATH}" ]]; then \
|
||||
echo "export TARGET_PKG_CONFIG_PATH=${TARGET_PKG_CONFIG_PATH}" >> /env-cargo ; \
|
||||
else \
|
||||
echo "export PKG_CONFIG_PATH=/usr/lib/$(xx-info)/pkgconfig" >> /env-cargo ; \
|
||||
fi && \
|
||||
echo "# End of env-cargo" >> /env-cargo ; \
|
||||
fi && \
|
||||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
@@ -133,8 +133,7 @@ target "debian-386" {
|
||||
platforms = ["linux/386"]
|
||||
tags = generate_tags("", "-386")
|
||||
args = {
|
||||
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
|
||||
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
|
||||
TARGET_PKG_CONFIG_PATH = "/usr/lib/i386-linux-gnu/pkgconfig"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,20 +141,12 @@ target "debian-ppc64le" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/ppc64le"]
|
||||
tags = generate_tags("", "-ppc64le")
|
||||
args = {
|
||||
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
|
||||
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
|
||||
}
|
||||
}
|
||||
|
||||
target "debian-s390x" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/s390x"]
|
||||
tags = generate_tags("", "-s390x")
|
||||
args = {
|
||||
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
|
||||
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
|
||||
}
|
||||
}
|
||||
// ==== End of unsupported Debian architecture targets ===
|
||||
|
||||
|
@@ -9,5 +9,8 @@ path = "src/lib.rs"
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
quote = "1.0.38"
|
||||
syn = "2.0.96"
|
||||
quote = "1.0.40"
|
||||
syn = "2.0.104"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@@ -1,5 +1,3 @@
|
||||
extern crate proc_macro;
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use quote::quote;
|
||||
|
||||
@@ -12,7 +10,7 @@ pub fn derive_uuid_from_param(input: TokenStream) -> TokenStream {
|
||||
|
||||
fn impl_derive_uuid_macro(ast: &syn::DeriveInput) -> TokenStream {
|
||||
let name = &ast.ident;
|
||||
let gen = quote! {
|
||||
let gen_derive = quote! {
|
||||
#[automatically_derived]
|
||||
impl<'r> rocket::request::FromParam<'r> for #name {
|
||||
type Error = ();
|
||||
@@ -27,7 +25,7 @@ fn impl_derive_uuid_macro(ast: &syn::DeriveInput) -> TokenStream {
|
||||
}
|
||||
}
|
||||
};
|
||||
gen.into()
|
||||
gen_derive.into()
|
||||
}
|
||||
|
||||
#[proc_macro_derive(IdFromParam)]
|
||||
@@ -39,7 +37,7 @@ pub fn derive_id_from_param(input: TokenStream) -> TokenStream {
|
||||
|
||||
fn impl_derive_safestring_macro(ast: &syn::DeriveInput) -> TokenStream {
|
||||
let name = &ast.ident;
|
||||
let gen = quote! {
|
||||
let gen_derive = quote! {
|
||||
#[automatically_derived]
|
||||
impl<'r> rocket::request::FromParam<'r> for #name {
|
||||
type Error = ();
|
||||
@@ -54,5 +52,5 @@ fn impl_derive_safestring_macro(ast: &syn::DeriveInput) -> TokenStream {
|
||||
}
|
||||
}
|
||||
};
|
||||
gen.into()
|
||||
gen_derive.into()
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
[toolchain]
|
||||
channel = "1.84.1"
|
||||
channel = "1.88.0"
|
||||
components = [ "rustfmt", "clippy" ]
|
||||
profile = "minimal"
|
||||
|
@@ -102,7 +102,7 @@ const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000";
|
||||
pub const FAKE_ADMIN_UUID: &str = "00000000-0000-0000-0000-000000000000";
|
||||
|
||||
fn admin_path() -> String {
|
||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||
format!("{}{ADMIN_PATH}", CONFIG.domain_path())
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -206,7 +206,7 @@ fn post_admin_login(
|
||||
|
||||
cookies.add(cookie);
|
||||
if let Some(redirect) = redirect {
|
||||
Ok(Redirect::to(format!("{}{}", admin_path(), redirect)))
|
||||
Ok(Redirect::to(format!("{}{redirect}", admin_path())))
|
||||
} else {
|
||||
Err(AdminResponse::Ok(render_admin_page()))
|
||||
}
|
||||
@@ -421,13 +421,13 @@ async fn delete_user(user_id: UserId, token: AdminToken, mut conn: DbConn) -> Em
|
||||
async fn deauth_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&user_id, &mut conn).await?;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
nt.send_logout(&user, None, &mut conn).await;
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||
match unregister_push_device(device.push_uuid).await {
|
||||
match unregister_push_device(&device.push_uuid).await {
|
||||
Ok(r) => r,
|
||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {e}"),
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -447,7 +447,7 @@ async fn disable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt:
|
||||
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
nt.send_logout(&user, None, &mut conn).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -591,20 +591,14 @@ struct GitCommit {
|
||||
sha: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct TimeApi {
|
||||
year: u16,
|
||||
month: u8,
|
||||
day: u8,
|
||||
hour: u8,
|
||||
minute: u8,
|
||||
seconds: u8,
|
||||
}
|
||||
|
||||
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
Ok(make_http_request(Method::GET, url)?.send().await?.error_for_status()?.json::<T>().await?)
|
||||
}
|
||||
|
||||
async fn get_text_api(url: &str) -> Result<String, Error> {
|
||||
Ok(make_http_request(Method::GET, url)?.send().await?.error_for_status()?.text().await?)
|
||||
}
|
||||
|
||||
async fn has_http_access() -> bool {
|
||||
let Ok(req) = make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") else {
|
||||
return false;
|
||||
@@ -616,10 +610,12 @@ async fn has_http_access() -> bool {
|
||||
}
|
||||
|
||||
use cached::proc_macro::cached;
|
||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
||||
#[cached(time = 300, sync_writes = true)]
|
||||
async fn get_release_info(has_http_access: bool, running_within_container: bool) -> (String, String, String) {
|
||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already
|
||||
/// It will cache this function for 600 seconds (10 minutes) which should prevent the exhaustion of the rate limit
|
||||
/// Any cache will be lost if Vaultwarden is restarted
|
||||
use std::time::Duration; // Needed for cached
|
||||
#[cached(time = 600, sync_writes = "default")]
|
||||
async fn get_release_info(has_http_access: bool) -> (String, String, String) {
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
if has_http_access {
|
||||
(
|
||||
@@ -636,19 +632,13 @@ async fn get_release_info(has_http_access: bool, running_within_container: bool)
|
||||
}
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
// Do not fetch the web-vault version when running within a container.
|
||||
// Do not fetch the web-vault version when running within a container
|
||||
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||
if running_within_container {
|
||||
"-".to_string()
|
||||
} else {
|
||||
match get_json_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
)
|
||||
match get_json_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest")
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
}
|
||||
{
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
@@ -658,17 +648,18 @@ async fn get_release_info(has_http_access: bool, running_within_container: bool)
|
||||
|
||||
async fn get_ntp_time(has_http_access: bool) -> String {
|
||||
if has_http_access {
|
||||
if let Ok(ntp_time) = get_json_api::<TimeApi>("https://www.timeapi.io/api/Time/current/zone?timeZone=UTC").await
|
||||
{
|
||||
return format!(
|
||||
"{year}-{month:02}-{day:02} {hour:02}:{minute:02}:{seconds:02} UTC",
|
||||
year = ntp_time.year,
|
||||
month = ntp_time.month,
|
||||
day = ntp_time.day,
|
||||
hour = ntp_time.hour,
|
||||
minute = ntp_time.minute,
|
||||
seconds = ntp_time.seconds
|
||||
);
|
||||
if let Ok(cf_trace) = get_text_api("https://cloudflare.com/cdn-cgi/trace").await {
|
||||
for line in cf_trace.lines() {
|
||||
if let Some((key, value)) = line.split_once('=') {
|
||||
if key == "ts" {
|
||||
let ts = value.split_once('.').map_or(value, |(s, _)| s);
|
||||
if let Ok(dt) = chrono::DateTime::parse_from_str(ts, "%s") {
|
||||
return dt.format("%Y-%m-%d %H:%M:%S UTC").to_string();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
String::from("Unable to fetch NTP time.")
|
||||
@@ -693,14 +684,23 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
_ => "Unable to resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) =
|
||||
get_release_info(has_http_access, running_within_container).await;
|
||||
let (latest_release, latest_commit, latest_web_build) = get_release_info(has_http_access).await;
|
||||
|
||||
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version = get_web_vault_version();
|
||||
|
||||
// Check if the running version is newer than the latest stable released version
|
||||
let web_vault_pre_release = if let Ok(web_ver_match) = semver::VersionReq::parse(&format!(">{latest_web_build}")) {
|
||||
web_ver_match.matches(
|
||||
&semver::Version::parse(&web_vault_version).unwrap_or_else(|_| semver::Version::parse("2025.1.1").unwrap()),
|
||||
)
|
||||
} else {
|
||||
error!("Unable to parse latest_web_build: '{latest_web_build}'");
|
||||
false
|
||||
};
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
"current_release": VERSION,
|
||||
@@ -709,6 +709,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||
"web_vault_version": web_vault_version,
|
||||
"latest_web_build": latest_web_build,
|
||||
"web_vault_pre_release": web_vault_pre_release,
|
||||
"running_within_container": running_within_container,
|
||||
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
||||
"has_http_access": has_http_access,
|
||||
@@ -724,6 +725,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"host_arch": env::consts::ARCH,
|
||||
"host_os": env::consts::OS,
|
||||
"tz_env": env::var("TZ").unwrap_or_default(),
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
|
||||
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
|
||||
@@ -745,17 +747,17 @@ fn get_diagnostics_http(code: u16, _token: AdminToken) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/config", format = "application/json", data = "<data>")]
|
||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
async fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
let data: ConfigBuilder = data.into_inner();
|
||||
if let Err(e) = CONFIG.update_config(data, true) {
|
||||
if let Err(e) = CONFIG.update_config(data, true).await {
|
||||
err!(format!("Unable to save config: {e:?}"))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/config/delete", format = "application/json")]
|
||||
fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
if let Err(e) = CONFIG.delete_user_config() {
|
||||
async fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
if let Err(e) = CONFIG.delete_user_config().await {
|
||||
err!(format!("Unable to delete config: {e:?}"))
|
||||
}
|
||||
Ok(())
|
||||
|
@@ -8,8 +8,8 @@ use serde_json::Value;
|
||||
use crate::{
|
||||
api::{
|
||||
core::{log_user_event, two_factor::email},
|
||||
register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult, Notify,
|
||||
PasswordOrOtpData, UpdateType,
|
||||
master_password_policy, register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult,
|
||||
Notify, PasswordOrOtpData, UpdateType,
|
||||
},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
|
||||
crypto,
|
||||
@@ -70,18 +70,31 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RegisterData {
|
||||
email: String,
|
||||
|
||||
kdf: Option<i32>,
|
||||
kdf_iterations: Option<i32>,
|
||||
kdf_memory: Option<i32>,
|
||||
kdf_parallelism: Option<i32>,
|
||||
|
||||
#[serde(alias = "userSymmetricKey")]
|
||||
key: String,
|
||||
#[serde(alias = "userAsymmetricKeys")]
|
||||
keys: Option<KeysData>,
|
||||
|
||||
master_password_hash: String,
|
||||
master_password_hint: Option<String>,
|
||||
|
||||
name: Option<String>,
|
||||
token: Option<String>,
|
||||
|
||||
#[allow(dead_code)]
|
||||
organization_user_id: Option<MembershipId>,
|
||||
|
||||
// Used only from the register/finish endpoint
|
||||
email_verification_token: Option<String>,
|
||||
accept_emergency_access_id: Option<EmergencyAccessId>,
|
||||
accept_emergency_access_invite_token: Option<String>,
|
||||
#[serde(alias = "token")]
|
||||
org_invite_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -115,22 +128,86 @@ async fn is_email_2fa_required(member_id: Option<MembershipId>, conn: &mut DbCon
|
||||
if CONFIG.email_2fa_enforce_on_verified_invite() {
|
||||
return true;
|
||||
}
|
||||
if member_id.is_some() {
|
||||
return OrgPolicy::is_enabled_for_member(&member_id.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn)
|
||||
.await;
|
||||
if let Some(member_id) = member_id {
|
||||
return OrgPolicy::is_enabled_for_member(&member_id, OrgPolicyType::TwoFactorAuthentication, conn).await;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
async fn register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, conn).await
|
||||
_register(data, false, conn).await
|
||||
}
|
||||
|
||||
pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult {
|
||||
let data: RegisterData = data.into_inner();
|
||||
pub async fn _register(data: Json<RegisterData>, email_verification: bool, mut conn: DbConn) -> JsonResult {
|
||||
let mut data: RegisterData = data.into_inner();
|
||||
let email = data.email.to_lowercase();
|
||||
|
||||
let mut email_verified = false;
|
||||
|
||||
let mut pending_emergency_access = None;
|
||||
|
||||
// First, validate the provided verification tokens
|
||||
if email_verification {
|
||||
match (
|
||||
&data.email_verification_token,
|
||||
&data.accept_emergency_access_id,
|
||||
&data.accept_emergency_access_invite_token,
|
||||
&data.organization_user_id,
|
||||
&data.org_invite_token,
|
||||
) {
|
||||
// Normal user registration, when email verification is required
|
||||
(Some(email_verification_token), None, None, None, None) => {
|
||||
let claims = crate::auth::decode_register_verify(email_verification_token)?;
|
||||
if claims.sub != data.email {
|
||||
err!("Email verification token does not match email");
|
||||
}
|
||||
|
||||
// During this call we don't get the name, so extract it from the claims
|
||||
if claims.name.is_some() {
|
||||
data.name = claims.name;
|
||||
}
|
||||
email_verified = claims.verified;
|
||||
}
|
||||
// Emergency access registration
|
||||
(None, Some(accept_emergency_access_id), Some(accept_emergency_access_invite_token), None, None) => {
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
err!("Emergency access is not enabled.")
|
||||
}
|
||||
|
||||
let claims = crate::auth::decode_emergency_access_invite(accept_emergency_access_invite_token)?;
|
||||
|
||||
if claims.email != data.email {
|
||||
err!("Claim email does not match email")
|
||||
}
|
||||
if &claims.emer_id != accept_emergency_access_id {
|
||||
err!("Claim emer_id does not match accept_emergency_access_id")
|
||||
}
|
||||
|
||||
pending_emergency_access = Some((accept_emergency_access_id, claims));
|
||||
email_verified = true;
|
||||
}
|
||||
// Org invite
|
||||
(None, None, None, Some(organization_user_id), Some(org_invite_token)) => {
|
||||
let claims = decode_invite(org_invite_token)?;
|
||||
|
||||
if claims.email != data.email {
|
||||
err!("Claim email does not match email")
|
||||
}
|
||||
|
||||
if &claims.member_id != organization_user_id {
|
||||
err!("Claim org_user_id does not match organization_user_id")
|
||||
}
|
||||
|
||||
email_verified = true;
|
||||
}
|
||||
|
||||
_ => {
|
||||
err!("Registration is missing required parameters")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
|
||||
// This also prevents issues with very long usernames causing to large JWT's. See #2419
|
||||
if let Some(ref name) = data.name {
|
||||
@@ -144,20 +221,17 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
|
||||
let password_hint = clean_password_hint(&data.master_password_hint);
|
||||
enforce_password_hint_setting(&password_hint)?;
|
||||
|
||||
let mut verified_by_invite = false;
|
||||
|
||||
let mut user = match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(mut user) => {
|
||||
Some(user) => {
|
||||
if !user.password_hash.is_empty() {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
|
||||
if let Some(token) = data.token {
|
||||
if let Some(token) = data.org_invite_token {
|
||||
let claims = decode_invite(&token)?;
|
||||
if claims.email == email {
|
||||
// Verify the email address when signing up via a valid invite token
|
||||
verified_by_invite = true;
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
email_verified = true;
|
||||
user
|
||||
} else {
|
||||
err!("Registration email does not match invite email")
|
||||
@@ -181,7 +255,10 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
|
||||
// Order is important here; the invitation check must come first
|
||||
// because the vaultwarden admin can invite anyone, regardless
|
||||
// of other signup restrictions.
|
||||
if Invitation::take(&email, &mut conn).await || CONFIG.is_signup_allowed(&email) {
|
||||
if Invitation::take(&email, &mut conn).await
|
||||
|| CONFIG.is_signup_allowed(&email)
|
||||
|| pending_emergency_access.is_some()
|
||||
{
|
||||
User::new(email.clone())
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
@@ -216,17 +293,21 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
|
||||
user.public_key = Some(keys.public_key);
|
||||
}
|
||||
|
||||
if email_verified {
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if CONFIG.signups_verify() && !verified_by_invite {
|
||||
if CONFIG.signups_verify() && !email_verified {
|
||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
error!("Error sending welcome email: {e:#?}");
|
||||
}
|
||||
user.last_verifying_at = Some(user.created_at);
|
||||
} else if let Err(e) = mail::send_welcome(&user.email).await {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
error!("Error sending welcome email: {e:#?}");
|
||||
}
|
||||
|
||||
if verified_by_invite && is_email_2fa_required(data.organization_user_id, &mut conn).await {
|
||||
if email_verified && is_email_2fa_required(data.organization_user_id, &mut conn).await {
|
||||
email::activate_email_2fa(&user, &mut conn).await.ok();
|
||||
}
|
||||
}
|
||||
@@ -255,7 +336,6 @@ async fn profile(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ProfileData {
|
||||
// culture: String, // Ignored, always use en-US
|
||||
// masterPasswordHint: Option<String>, // Ignored, has been moved to ChangePassData
|
||||
name: String,
|
||||
}
|
||||
|
||||
@@ -381,7 +461,7 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, mut conn: D
|
||||
// Prevent logging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone())).await;
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone()), &mut conn).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -441,7 +521,7 @@ async fn post_kdf(data: Json<ChangeKdfData>, headers: Headers, mut conn: DbConn,
|
||||
user.set_password(&data.new_master_password_hash, Some(data.key), true, None);
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone())).await;
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone()), &mut conn).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -476,14 +556,45 @@ use super::sends::{update_send_from_data, SendData};
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct KeyData {
|
||||
account_unlock_data: RotateAccountUnlockData,
|
||||
account_keys: RotateAccountKeys,
|
||||
account_data: RotateAccountData,
|
||||
old_master_key_authentication_hash: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RotateAccountUnlockData {
|
||||
emergency_access_unlock_data: Vec<UpdateEmergencyAccessData>,
|
||||
master_password_unlock_data: MasterPasswordUnlockData,
|
||||
organization_account_recovery_unlock_data: Vec<UpdateResetPasswordData>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct MasterPasswordUnlockData {
|
||||
kdf_type: i32,
|
||||
kdf_iterations: i32,
|
||||
kdf_parallelism: Option<i32>,
|
||||
kdf_memory: Option<i32>,
|
||||
email: String,
|
||||
master_key_authentication_hash: String,
|
||||
master_key_encrypted_user_key: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RotateAccountKeys {
|
||||
user_key_encrypted_account_private_key: String,
|
||||
account_public_key: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RotateAccountData {
|
||||
ciphers: Vec<CipherData>,
|
||||
folders: Vec<UpdateFolderData>,
|
||||
sends: Vec<SendData>,
|
||||
emergency_access_keys: Vec<UpdateEmergencyAccessData>,
|
||||
reset_password_keys: Vec<UpdateResetPasswordData>,
|
||||
key: String,
|
||||
master_password_hash: String,
|
||||
private_key: String,
|
||||
}
|
||||
|
||||
fn validate_keydata(
|
||||
@@ -493,10 +604,24 @@ fn validate_keydata(
|
||||
existing_emergency_access: &[EmergencyAccess],
|
||||
existing_memberships: &[Membership],
|
||||
existing_sends: &[Send],
|
||||
user: &User,
|
||||
) -> EmptyResult {
|
||||
if user.client_kdf_type != data.account_unlock_data.master_password_unlock_data.kdf_type
|
||||
|| user.client_kdf_iter != data.account_unlock_data.master_password_unlock_data.kdf_iterations
|
||||
|| user.client_kdf_memory != data.account_unlock_data.master_password_unlock_data.kdf_memory
|
||||
|| user.client_kdf_parallelism != data.account_unlock_data.master_password_unlock_data.kdf_parallelism
|
||||
|| user.email != data.account_unlock_data.master_password_unlock_data.email
|
||||
{
|
||||
err!("Changing the kdf variant or email is not supported during key rotation");
|
||||
}
|
||||
if user.public_key.as_ref() != Some(&data.account_keys.account_public_key) {
|
||||
err!("Changing the asymmetric keypair is not possible during key rotation")
|
||||
}
|
||||
|
||||
// Check that we're correctly rotating all the user's ciphers
|
||||
let existing_cipher_ids = existing_ciphers.iter().map(|c| &c.uuid).collect::<HashSet<&CipherId>>();
|
||||
let provided_cipher_ids = data
|
||||
.account_data
|
||||
.ciphers
|
||||
.iter()
|
||||
.filter(|c| c.organization_id.is_none())
|
||||
@@ -508,7 +633,8 @@ fn validate_keydata(
|
||||
|
||||
// Check that we're correctly rotating all the user's folders
|
||||
let existing_folder_ids = existing_folders.iter().map(|f| &f.uuid).collect::<HashSet<&FolderId>>();
|
||||
let provided_folder_ids = data.folders.iter().filter_map(|f| f.id.as_ref()).collect::<HashSet<&FolderId>>();
|
||||
let provided_folder_ids =
|
||||
data.account_data.folders.iter().filter_map(|f| f.id.as_ref()).collect::<HashSet<&FolderId>>();
|
||||
if !provided_folder_ids.is_superset(&existing_folder_ids) {
|
||||
err!("All existing folders must be included in the rotation")
|
||||
}
|
||||
@@ -516,8 +642,12 @@ fn validate_keydata(
|
||||
// Check that we're correctly rotating all the user's emergency access keys
|
||||
let existing_emergency_access_ids =
|
||||
existing_emergency_access.iter().map(|ea| &ea.uuid).collect::<HashSet<&EmergencyAccessId>>();
|
||||
let provided_emergency_access_ids =
|
||||
data.emergency_access_keys.iter().map(|ea| &ea.id).collect::<HashSet<&EmergencyAccessId>>();
|
||||
let provided_emergency_access_ids = data
|
||||
.account_unlock_data
|
||||
.emergency_access_unlock_data
|
||||
.iter()
|
||||
.map(|ea| &ea.id)
|
||||
.collect::<HashSet<&EmergencyAccessId>>();
|
||||
if !provided_emergency_access_ids.is_superset(&existing_emergency_access_ids) {
|
||||
err!("All existing emergency access keys must be included in the rotation")
|
||||
}
|
||||
@@ -525,15 +655,19 @@ fn validate_keydata(
|
||||
// Check that we're correctly rotating all the user's reset password keys
|
||||
let existing_reset_password_ids =
|
||||
existing_memberships.iter().map(|m| &m.org_uuid).collect::<HashSet<&OrganizationId>>();
|
||||
let provided_reset_password_ids =
|
||||
data.reset_password_keys.iter().map(|rp| &rp.organization_id).collect::<HashSet<&OrganizationId>>();
|
||||
let provided_reset_password_ids = data
|
||||
.account_unlock_data
|
||||
.organization_account_recovery_unlock_data
|
||||
.iter()
|
||||
.map(|rp| &rp.organization_id)
|
||||
.collect::<HashSet<&OrganizationId>>();
|
||||
if !provided_reset_password_ids.is_superset(&existing_reset_password_ids) {
|
||||
err!("All existing reset password keys must be included in the rotation")
|
||||
}
|
||||
|
||||
// Check that we're correctly rotating all the user's sends
|
||||
let existing_send_ids = existing_sends.iter().map(|s| &s.uuid).collect::<HashSet<&SendId>>();
|
||||
let provided_send_ids = data.sends.iter().filter_map(|s| s.id.as_ref()).collect::<HashSet<&SendId>>();
|
||||
let provided_send_ids = data.account_data.sends.iter().filter_map(|s| s.id.as_ref()).collect::<HashSet<&SendId>>();
|
||||
if !provided_send_ids.is_superset(&existing_send_ids) {
|
||||
err!("All existing sends must be included in the rotation")
|
||||
}
|
||||
@@ -541,12 +675,12 @@ fn validate_keydata(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/accounts/key", data = "<data>")]
|
||||
#[post("/accounts/key-management/rotate-user-account-keys", data = "<data>")]
|
||||
async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
// TODO: See if we can wrap everything within a SQL Transaction. If something fails it should revert everything.
|
||||
let data: KeyData = data.into_inner();
|
||||
|
||||
if !headers.user.check_valid_password(&data.master_password_hash) {
|
||||
if !headers.user.check_valid_password(&data.old_master_key_authentication_hash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
@@ -554,7 +688,7 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
// Bitwarden does not process the import if there is one item invalid.
|
||||
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
||||
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||
Cipher::validate_cipher_data(&data.ciphers)?;
|
||||
Cipher::validate_cipher_data(&data.account_data.ciphers)?;
|
||||
|
||||
let user_id = &headers.user.uuid;
|
||||
|
||||
@@ -575,10 +709,11 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
&existing_emergency_access,
|
||||
&existing_memberships,
|
||||
&existing_sends,
|
||||
&headers.user,
|
||||
)?;
|
||||
|
||||
// Update folder data
|
||||
for folder_data in data.folders {
|
||||
for folder_data in data.account_data.folders {
|
||||
// Skip `null` folder id entries.
|
||||
// See: https://github.com/bitwarden/clients/issues/8453
|
||||
if let Some(folder_id) = folder_data.id {
|
||||
@@ -592,7 +727,7 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
}
|
||||
|
||||
// Update emergency access data
|
||||
for emergency_access_data in data.emergency_access_keys {
|
||||
for emergency_access_data in data.account_unlock_data.emergency_access_unlock_data {
|
||||
let Some(saved_emergency_access) =
|
||||
existing_emergency_access.iter_mut().find(|ea| ea.uuid == emergency_access_data.id)
|
||||
else {
|
||||
@@ -604,7 +739,7 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
}
|
||||
|
||||
// Update reset password data
|
||||
for reset_password_data in data.reset_password_keys {
|
||||
for reset_password_data in data.account_unlock_data.organization_account_recovery_unlock_data {
|
||||
let Some(membership) =
|
||||
existing_memberships.iter_mut().find(|m| m.org_uuid == reset_password_data.organization_id)
|
||||
else {
|
||||
@@ -616,7 +751,7 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
}
|
||||
|
||||
// Update send data
|
||||
for send_data in data.sends {
|
||||
for send_data in data.account_data.sends {
|
||||
let Some(send) = existing_sends.iter_mut().find(|s| &s.uuid == send_data.id.as_ref().unwrap()) else {
|
||||
err!("Send doesn't exist")
|
||||
};
|
||||
@@ -627,7 +762,7 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
// Update cipher data
|
||||
use super::ciphers::update_cipher_from_data;
|
||||
|
||||
for cipher_data in data.ciphers {
|
||||
for cipher_data in data.account_data.ciphers {
|
||||
if cipher_data.organization_id.is_none() {
|
||||
let Some(saved_cipher) = existing_ciphers.iter_mut().find(|c| &c.uuid == cipher_data.id.as_ref().unwrap())
|
||||
else {
|
||||
@@ -644,16 +779,20 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
// Update user data
|
||||
let mut user = headers.user;
|
||||
|
||||
user.akey = data.key;
|
||||
user.private_key = Some(data.private_key);
|
||||
user.reset_security_stamp();
|
||||
user.private_key = Some(data.account_keys.user_key_encrypted_account_private_key);
|
||||
user.set_password(
|
||||
&data.account_unlock_data.master_password_unlock_data.master_key_authentication_hash,
|
||||
Some(data.account_unlock_data.master_password_unlock_data.master_key_encrypted_user_key),
|
||||
true,
|
||||
None,
|
||||
);
|
||||
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
// Prevent logging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone())).await;
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone()), &mut conn).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -669,7 +808,7 @@ async fn post_sstamp(data: Json<PasswordOrOtpData>, headers: Headers, mut conn:
|
||||
user.reset_security_stamp();
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
nt.send_logout(&user, None, &mut conn).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -695,6 +834,11 @@ async fn post_email_token(data: Json<EmailTokenData>, headers: Headers, mut conn
|
||||
}
|
||||
|
||||
if User::find_by_mail(&data.new_email, &mut conn).await.is_some() {
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Err(e) = mail::send_change_email_existing(&data.new_email, &user.email).await {
|
||||
error!("Error sending change-email-existing email: {e:#?}");
|
||||
}
|
||||
}
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
@@ -706,10 +850,10 @@ async fn post_email_token(data: Json<EmailTokenData>, headers: Headers, mut conn
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Err(e) = mail::send_change_email(&data.new_email, &token).await {
|
||||
error!("Error sending change-email email: {:#?}", e);
|
||||
error!("Error sending change-email email: {e:#?}");
|
||||
}
|
||||
} else {
|
||||
debug!("Email change request for user ({}) to email ({}) with token ({})", user.uuid, data.new_email, token);
|
||||
debug!("Email change request for user ({}) to email ({}) with token ({token})", user.uuid, data.new_email);
|
||||
}
|
||||
|
||||
user.email_new = Some(data.new_email);
|
||||
@@ -777,7 +921,7 @@ async fn post_email(data: Json<ChangeEmailData>, headers: Headers, mut conn: DbC
|
||||
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
nt.send_logout(&user, None, &mut conn).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -791,7 +935,7 @@ async fn post_verify_email(headers: Headers) -> EmptyResult {
|
||||
}
|
||||
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await {
|
||||
error!("Error sending verify_email email: {:#?}", e);
|
||||
error!("Error sending verify_email email: {e:#?}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -822,7 +966,7 @@ async fn post_verify_email_token(data: Json<VerifyEmailTokenData>, mut conn: DbC
|
||||
user.last_verifying_at = None;
|
||||
user.login_verify_count = 0;
|
||||
if let Err(e) = user.save(&mut conn).await {
|
||||
error!("Error saving email verification: {:#?}", e);
|
||||
error!("Error saving email verification: {e:#?}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -841,7 +985,7 @@ async fn post_delete_recover(data: Json<DeleteRecoverData>, mut conn: DbConn) ->
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Some(user) = User::find_by_mail(&data.email, &mut conn).await {
|
||||
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid).await {
|
||||
error!("Error sending delete account email: {:#?}", e);
|
||||
error!("Error sending delete account email: {e:#?}");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -975,7 +1119,7 @@ pub async fn _prelogin(data: Json<PreloginData>, mut conn: DbConn) -> Json<Value
|
||||
}))
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/master/src/Api/Models/Request/Accounts/SecretVerificationRequestModel.cs
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Auth/Models/Request/Accounts/SecretVerificationRequestModel.cs
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct SecretVerificationRequest {
|
||||
@@ -983,7 +1127,7 @@ struct SecretVerificationRequest {
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-password", data = "<data>")]
|
||||
fn verify_password(data: Json<SecretVerificationRequest>, headers: Headers) -> EmptyResult {
|
||||
async fn verify_password(data: Json<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: SecretVerificationRequest = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
@@ -991,7 +1135,7 @@ fn verify_password(data: Json<SecretVerificationRequest>, headers: Headers) -> E
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(Json(master_password_policy(&user, &conn).await))
|
||||
}
|
||||
|
||||
async fn _api_key(data: Json<PasswordOrOtpData>, rotate: bool, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
@@ -1116,19 +1260,14 @@ async fn put_device_token(
|
||||
err!(format!("Error: device {device_id} should be present before a token can be assigned"))
|
||||
};
|
||||
|
||||
// if the device already has been registered
|
||||
if device.is_registered() {
|
||||
// check if the new token is the same as the registered token
|
||||
if device.push_token.is_some() && device.push_token.unwrap() == token.clone() {
|
||||
debug!("Device {} is already registered and token is the same", device_id);
|
||||
return Ok(());
|
||||
} else {
|
||||
// Try to unregister already registered device
|
||||
unregister_push_device(device.push_uuid).await.ok();
|
||||
}
|
||||
// clear the push_uuid
|
||||
device.push_uuid = None;
|
||||
// Check if the new token is the same as the registered token
|
||||
// Although upstream seems to always register a device on login, we do not.
|
||||
// Unless this causes issues, lets keep it this way, else we might need to also register on every login.
|
||||
if device.push_token.as_ref() == Some(&token) {
|
||||
debug!("Device {device_id} for user {} is already registered and token is identical", headers.user.uuid);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
device.push_token = Some(token);
|
||||
if let Err(e) = device.save(&mut conn).await {
|
||||
err!(format!("An error occurred while trying to save the device push token: {e}"));
|
||||
@@ -1142,16 +1281,19 @@ async fn put_device_token(
|
||||
#[put("/devices/identifier/<device_id>/clear-token")]
|
||||
async fn put_clear_device_token(device_id: DeviceId, mut conn: DbConn) -> EmptyResult {
|
||||
// This only clears push token
|
||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Controllers/DevicesController.cs#L215
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
// This is somehow not implemented in any app, added it in case it is required
|
||||
// 2025: Also, it looks like it only clears the first found device upstream, which is probably faulty.
|
||||
// This because currently multiple accounts could be on the same device/app and that would cause issues.
|
||||
// Vaultwarden removes the push-token for all devices, but this probably means we should also unregister all these devices.
|
||||
if !CONFIG.push_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(device) = Device::find_by_uuid(&device_id, &mut conn).await {
|
||||
Device::clear_push_token_by_uuid(&device_id, &mut conn).await?;
|
||||
unregister_push_device(device.push_uuid).await?;
|
||||
unregister_push_device(&device.push_uuid).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1189,10 +1331,10 @@ async fn post_auth_request(
|
||||
};
|
||||
|
||||
// Validate device uuid and type
|
||||
match Device::find_by_uuid_and_user(&data.device_identifier, &user.uuid, &mut conn).await {
|
||||
Some(device) if device.atype == client_headers.device_type => {}
|
||||
let device = match Device::find_by_uuid_and_user(&data.device_identifier, &user.uuid, &mut conn).await {
|
||||
Some(device) if device.atype == client_headers.device_type => device,
|
||||
_ => err!("AuthRequest doesn't exist", "Device verification failed"),
|
||||
}
|
||||
};
|
||||
|
||||
let mut auth_request = AuthRequest::new(
|
||||
user.uuid.clone(),
|
||||
@@ -1204,7 +1346,7 @@ async fn post_auth_request(
|
||||
);
|
||||
auth_request.save(&mut conn).await?;
|
||||
|
||||
nt.send_auth_request(&user.uuid, &auth_request.uuid, &data.device_identifier, &mut conn).await;
|
||||
nt.send_auth_request(&user.uuid, &auth_request.uuid, &device, &mut conn).await;
|
||||
|
||||
log_user_event(
|
||||
EventType::UserRequestedDeviceApproval as i32,
|
||||
@@ -1279,6 +1421,10 @@ async fn put_auth_request(
|
||||
err!("AuthRequest doesn't exist", "Record not found or user uuid does not match")
|
||||
};
|
||||
|
||||
if headers.device.uuid != data.device_identifier {
|
||||
err!("AuthRequest doesn't exist", "Device verification failed")
|
||||
}
|
||||
|
||||
if auth_request.approved.is_some() {
|
||||
err!("An authentication request with the same device already exists")
|
||||
}
|
||||
@@ -1295,7 +1441,7 @@ async fn put_auth_request(
|
||||
auth_request.save(&mut conn).await?;
|
||||
|
||||
ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await;
|
||||
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, &data.device_identifier, &mut conn).await;
|
||||
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, &headers.device, &mut conn).await;
|
||||
|
||||
log_user_event(
|
||||
EventType::OrganizationUserApprovedAuthRequest as i32,
|
||||
|
@@ -11,10 +11,11 @@ use rocket::{
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::auth::ClientVersion;
|
||||
use crate::util::NumberOrString;
|
||||
use crate::util::{save_temp_file, NumberOrString};
|
||||
use crate::{
|
||||
api::{self, core::log_event, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType},
|
||||
auth::Headers,
|
||||
config::PathType,
|
||||
crypto,
|
||||
db::{models::*, DbConn, DbPool},
|
||||
CONFIG,
|
||||
@@ -105,12 +106,7 @@ struct SyncData {
|
||||
}
|
||||
|
||||
#[get("/sync?<data..>")]
|
||||
async fn sync(
|
||||
data: SyncData,
|
||||
headers: Headers,
|
||||
client_version: Option<ClientVersion>,
|
||||
mut conn: DbConn,
|
||||
) -> Json<Value> {
|
||||
async fn sync(data: SyncData, headers: Headers, client_version: Option<ClientVersion>, mut conn: DbConn) -> JsonResult {
|
||||
let user_json = headers.user.to_json(&mut conn).await;
|
||||
|
||||
// Get all ciphers which are visible by the user
|
||||
@@ -134,7 +130,7 @@ async fn sync(
|
||||
for c in ciphers {
|
||||
ciphers_json.push(
|
||||
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
|
||||
.await,
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -159,7 +155,7 @@ async fn sync(
|
||||
api::core::_get_eq_domains(headers, true).into_inner()
|
||||
};
|
||||
|
||||
Json(json!({
|
||||
Ok(Json(json!({
|
||||
"profile": user_json,
|
||||
"folders": folders_json,
|
||||
"collections": collections_json,
|
||||
@@ -168,11 +164,11 @@ async fn sync(
|
||||
"domains": domains_json,
|
||||
"sends": sends_json,
|
||||
"object": "sync"
|
||||
}))
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/ciphers")]
|
||||
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await;
|
||||
|
||||
@@ -180,15 +176,15 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
for c in ciphers {
|
||||
ciphers_json.push(
|
||||
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
|
||||
.await,
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
Json(json!({
|
||||
Ok(Json(json!({
|
||||
"data": ciphers_json,
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
}))
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>")]
|
||||
@@ -201,7 +197,7 @@ async fn get_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn) ->
|
||||
err!("Cipher is not owned by user")
|
||||
}
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>/admin")]
|
||||
@@ -339,7 +335,7 @@ async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn
|
||||
let mut cipher = Cipher::new(data.r#type, data.name.clone());
|
||||
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
|
||||
@@ -381,7 +377,7 @@ pub async fn update_cipher_from_data(
|
||||
if let Some(dt) = data.last_known_revision_date {
|
||||
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||
// ISO 8601 format
|
||||
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||
Err(err) => warn!("Error parsing LastKnownRevisionDate '{dt}': {err}"),
|
||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||
}
|
||||
@@ -535,7 +531,7 @@ pub async fn update_cipher_from_data(
|
||||
ut,
|
||||
cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
shared_to_collections,
|
||||
conn,
|
||||
)
|
||||
@@ -612,7 +608,7 @@ async fn post_ciphers_import(
|
||||
|
||||
let mut user = headers.user;
|
||||
user.update_revision(&mut conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user).await;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &mut conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -676,7 +672,7 @@ async fn put_cipher(
|
||||
|
||||
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[post("/ciphers/<cipher_id>/partial", data = "<data>")]
|
||||
@@ -714,7 +710,7 @@ async fn put_cipher_partial(
|
||||
// Update favorite
|
||||
cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &mut conn).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -808,7 +804,7 @@ async fn post_collections_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
Some(Vec::from_iter(posted_collections)),
|
||||
&mut conn,
|
||||
)
|
||||
@@ -825,7 +821,7 @@ async fn post_collections_update(
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/collections-admin", data = "<data>")]
|
||||
@@ -885,7 +881,7 @@ async fn post_collections_admin(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
Some(Vec::from_iter(posted_collections)),
|
||||
&mut conn,
|
||||
)
|
||||
@@ -1030,7 +1026,7 @@ async fn share_cipher_by_uuid(
|
||||
|
||||
update_cipher_from_data(&mut cipher, data.cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
|
||||
}
|
||||
|
||||
/// v2 API for downloading an attachment. This just redirects the client to
|
||||
@@ -1055,7 +1051,7 @@ async fn get_attachment(
|
||||
}
|
||||
|
||||
match Attachment::find_by_id(&attachment_id, &mut conn).await {
|
||||
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))),
|
||||
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host).await?)),
|
||||
Some(_) => err!("Attachment doesn't belong to cipher"),
|
||||
None => err!("Attachment doesn't exist"),
|
||||
}
|
||||
@@ -1105,7 +1101,7 @@ async fn post_attachment_v2(
|
||||
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.file_name, file_size, Some(data.key));
|
||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||
|
||||
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
|
||||
let url = format!("/ciphers/{}/attachment/{attachment_id}", cipher.uuid);
|
||||
let response_key = match data.admin_request {
|
||||
Some(b) if b => "cipherMiniResponse",
|
||||
_ => "cipherResponse",
|
||||
@@ -1116,7 +1112,7 @@ async fn post_attachment_v2(
|
||||
"attachmentId": attachment_id,
|
||||
"url": url,
|
||||
"fileUploadType": FileUploadType::Direct as i32,
|
||||
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await,
|
||||
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?,
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -1142,7 +1138,7 @@ async fn save_attachment(
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> Result<(Cipher, DbConn), crate::error::Error> {
|
||||
let mut data = data.into_inner();
|
||||
let data = data.into_inner();
|
||||
|
||||
let Some(size) = data.data.len().to_i64() else {
|
||||
err!("Attachment data size overflow");
|
||||
@@ -1269,19 +1265,13 @@ async fn save_attachment(
|
||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||
}
|
||||
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_id.as_ref());
|
||||
let file_path = folder_path.join(file_id.as_ref());
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
data.data.move_copy_to(file_path).await?
|
||||
}
|
||||
save_temp_file(PathType::Attachments, &format!("{cipher_id}/{file_id}"), data.data, true).await?;
|
||||
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
None,
|
||||
&mut conn,
|
||||
)
|
||||
@@ -1342,7 +1332,7 @@ async fn post_attachment(
|
||||
|
||||
let (cipher, mut conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[post("/ciphers/<cipher_id>/attachment-admin", format = "multipart/form-data", data = "<data>")]
|
||||
@@ -1376,7 +1366,7 @@ async fn delete_attachment_post_admin(
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
) -> JsonResult {
|
||||
delete_attachment(cipher_id, attachment_id, headers, conn, nt).await
|
||||
}
|
||||
|
||||
@@ -1387,7 +1377,7 @@ async fn delete_attachment_post(
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
) -> JsonResult {
|
||||
delete_attachment(cipher_id, attachment_id, headers, conn, nt).await
|
||||
}
|
||||
|
||||
@@ -1398,7 +1388,7 @@ async fn delete_attachment(
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
) -> JsonResult {
|
||||
_delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await
|
||||
}
|
||||
|
||||
@@ -1409,7 +1399,7 @@ async fn delete_attachment_admin(
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
) -> JsonResult {
|
||||
_delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await
|
||||
}
|
||||
|
||||
@@ -1581,8 +1571,8 @@ async fn move_cipher_selected(
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&[user_id.clone()],
|
||||
&headers.device.uuid,
|
||||
std::slice::from_ref(&user_id),
|
||||
&headers.device,
|
||||
None,
|
||||
&mut conn,
|
||||
)
|
||||
@@ -1629,7 +1619,7 @@ async fn delete_all(
|
||||
Some(member) => {
|
||||
if member.atype == MembershipType::Owner {
|
||||
Cipher::delete_all_by_organization(&org_data.org_id, &mut conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user).await;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &mut conn).await;
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationPurgedVault as i32,
|
||||
@@ -1662,7 +1652,7 @@ async fn delete_all(
|
||||
}
|
||||
|
||||
user.update_revision(&mut conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user).await;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &mut conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1691,7 +1681,7 @@ async fn _delete_cipher_by_uuid(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
None,
|
||||
conn,
|
||||
)
|
||||
@@ -1702,7 +1692,7 @@ async fn _delete_cipher_by_uuid(
|
||||
UpdateType::SyncCipherDelete,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
None,
|
||||
conn,
|
||||
)
|
||||
@@ -1767,7 +1757,7 @@ async fn _restore_cipher_by_uuid(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
None,
|
||||
conn,
|
||||
)
|
||||
@@ -1786,7 +1776,7 @@ async fn _restore_cipher_by_uuid(
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
|
||||
}
|
||||
|
||||
async fn _restore_multiple_ciphers(
|
||||
@@ -1818,7 +1808,7 @@ async fn _delete_cipher_attachment_by_id(
|
||||
headers: &Headers,
|
||||
conn: &mut DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
) -> JsonResult {
|
||||
let Some(attachment) = Attachment::find_by_id(attachment_id, conn).await else {
|
||||
err!("Attachment doesn't exist")
|
||||
};
|
||||
@@ -1841,17 +1831,17 @@ async fn _delete_cipher_attachment_by_id(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
None,
|
||||
conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Some(org_id) = cipher.organization_uuid {
|
||||
if let Some(ref org_id) = cipher.organization_uuid {
|
||||
log_event(
|
||||
EventType::CipherAttachmentDeleted as i32,
|
||||
&cipher.uuid,
|
||||
&org_id,
|
||||
org_id,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
@@ -1859,7 +1849,8 @@ async fn _delete_cipher_attachment_by_id(
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Ok(())
|
||||
let cipher_json = cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?;
|
||||
Ok(Json(json!({"cipher":cipher_json})))
|
||||
}
|
||||
|
||||
/// This will hold all the necessary data to improve a full sync of all the ciphers
|
||||
@@ -1933,11 +1924,21 @@ impl CipherSyncData {
|
||||
|
||||
// Generate a HashMap with the collections_uuid as key and the CollectionGroup record
|
||||
let user_collections_groups: HashMap<CollectionId, CollectionGroup> = if CONFIG.org_groups_enabled() {
|
||||
CollectionGroup::find_by_user(user_id, conn)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|collection_group| (collection_group.collections_uuid.clone(), collection_group))
|
||||
.collect()
|
||||
CollectionGroup::find_by_user(user_id, conn).await.into_iter().fold(
|
||||
HashMap::new(),
|
||||
|mut combined_permissions, cg| {
|
||||
combined_permissions
|
||||
.entry(cg.collections_uuid.clone())
|
||||
.and_modify(|existing| {
|
||||
// Combine permissions: take the most permissive settings.
|
||||
existing.read_only &= cg.read_only; // false if ANY group allows write
|
||||
existing.hide_passwords &= cg.hide_passwords; // false if ANY group allows password view
|
||||
existing.manage |= cg.manage; // true if ANY group allows manage
|
||||
})
|
||||
.or_insert(cg);
|
||||
combined_permissions
|
||||
},
|
||||
)
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
@@ -227,7 +227,7 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
|
||||
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
|
||||
None => {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("Grantee user does not exist: {}", &email))
|
||||
err!(format!("Grantee user does not exist: {email}"))
|
||||
}
|
||||
|
||||
if !CONFIG.is_email_domain_allowed(&email) {
|
||||
@@ -582,7 +582,7 @@ async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut
|
||||
CipherSyncType::User,
|
||||
&mut conn,
|
||||
)
|
||||
.await,
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
|
@@ -29,7 +29,7 @@ struct EventRange {
|
||||
continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Controllers/EventsController.cs#L87
|
||||
#[get("/organizations/<org_id>/events?<data..>")]
|
||||
async fn get_org_events(
|
||||
org_id: OrganizationId,
|
||||
@@ -169,8 +169,8 @@ struct EventCollection {
|
||||
}
|
||||
|
||||
// Upstream:
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Events/Controllers/CollectController.cs
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/EventService.cs
|
||||
#[post("/collect", format = "application/json", data = "<data>")]
|
||||
async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
|
@@ -45,7 +45,7 @@ async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn
|
||||
let mut folder = Folder::new(headers.user.uuid, data.name);
|
||||
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device, &mut conn).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
@@ -78,7 +78,7 @@ async fn put_folder(
|
||||
folder.name = data.name;
|
||||
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device, &mut conn).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
@@ -97,6 +97,6 @@ async fn delete_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn,
|
||||
// Delete the actual folder entry
|
||||
folder.delete(&mut conn).await?;
|
||||
|
||||
nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device.uuid, &mut conn).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device, &mut conn).await;
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -124,7 +124,7 @@ async fn post_eq_domains(
|
||||
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
nt.send_user_update(UpdateType::SyncSettings, &user).await;
|
||||
nt.send_user_update(UpdateType::SyncSettings, &user, &headers.device.push_uuid, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
@@ -199,11 +199,18 @@ fn get_api_webauthn(_headers: Headers) -> Json<Value> {
|
||||
#[get("/config")]
|
||||
fn config() -> Json<Value> {
|
||||
let domain = crate::CONFIG.domain();
|
||||
// Official available feature flags can be found here:
|
||||
// Server (v2025.6.2): https://github.com/bitwarden/server/blob/d094be3267f2030bd0dc62106bc6871cf82682f5/src/Core/Constants.cs#L103
|
||||
// Client (web-v2025.6.1): https://github.com/bitwarden/clients/blob/747c2fd6a1c348a57a76e4a7de8128466ffd3c01/libs/common/src/enums/feature-flag.enum.ts#L12
|
||||
// Android (v2025.6.0): https://github.com/bitwarden/android/blob/b5b022caaad33390c31b3021b2c1205925b0e1a2/app/src/main/kotlin/com/x8bit/bitwarden/data/platform/manager/model/FlagKey.kt#L22
|
||||
// iOS (v2025.6.0): https://github.com/bitwarden/ios/blob/ff06d9c6cc8da89f78f37f376495800201d7261a/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
|
||||
let mut feature_states =
|
||||
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||
// Force the new key rotation feature
|
||||
feature_states.insert("key-rotation-improvements".to_string(), true);
|
||||
feature_states.insert("flexible-collections-v-1".to_string(), false);
|
||||
feature_states.insert("duo-redirect".to_string(), true);
|
||||
feature_states.insert("email-verification".to_string(), true);
|
||||
feature_states.insert("unauth-ui-refresh".to_string(), true);
|
||||
feature_states.insert("enable-pm-flight-recorder".to_string(), true);
|
||||
feature_states.insert("mobile-error-reporting".to_string(), true);
|
||||
|
||||
Json(json!({
|
||||
// Note: The clients use this version to handle backwards compatibility concerns
|
||||
@@ -211,14 +218,14 @@ fn config() -> Json<Value> {
|
||||
// We should make sure that we keep this updated when we support the new server features
|
||||
// Version history:
|
||||
// - Individual cipher key encryption: 2024.2.0
|
||||
"version": "2025.1.0",
|
||||
"version": "2025.6.0",
|
||||
"gitHash": option_env!("GIT_REV"),
|
||||
"server": {
|
||||
"name": "Vaultwarden",
|
||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
||||
},
|
||||
"settings": {
|
||||
"disableUserRegistration": !crate::CONFIG.signups_allowed() && crate::CONFIG.signups_domains_whitelist().is_empty(),
|
||||
"disableUserRegistration": crate::CONFIG.is_signup_disabled()
|
||||
},
|
||||
"environment": {
|
||||
"vault": domain,
|
||||
@@ -226,6 +233,12 @@ fn config() -> Json<Value> {
|
||||
"identity": format!("{domain}/identity"),
|
||||
"notifications": format!("{domain}/notifications"),
|
||||
"sso": "",
|
||||
"cloudRegion": null,
|
||||
},
|
||||
// Bitwarden uses this for the self-hosted servers to indicate the default push technology
|
||||
"push": {
|
||||
"pushTechnology": 0,
|
||||
"vapidPublicKey": null
|
||||
},
|
||||
"featureStates": feature_states,
|
||||
"object": "config",
|
||||
|
@@ -10,10 +10,7 @@ use crate::{
|
||||
core::{log_event, two_factor, CipherSyncData, CipherSyncType},
|
||||
EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType,
|
||||
},
|
||||
auth::{
|
||||
decode_invite, AdminHeaders, ClientVersion, Headers, ManagerHeaders, ManagerHeadersLoose, OrgMemberHeaders,
|
||||
OwnerHeaders,
|
||||
},
|
||||
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OrgMemberHeaders, OwnerHeaders},
|
||||
db::{models::*, DbConn},
|
||||
mail,
|
||||
util::{convert_json_key_lcase_first, NumberOrString},
|
||||
@@ -38,6 +35,7 @@ pub fn routes() -> Vec<Route> {
|
||||
post_organization_collections,
|
||||
delete_organization_collection_member,
|
||||
post_organization_collection_delete_member,
|
||||
post_bulk_access_collections,
|
||||
post_organization_collection_update,
|
||||
put_organization_collection_update,
|
||||
delete_organization_collection,
|
||||
@@ -129,17 +127,17 @@ struct OrganizationUpdateData {
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NewCollectionData {
|
||||
struct FullCollectionData {
|
||||
name: String,
|
||||
groups: Vec<NewCollectionGroupData>,
|
||||
users: Vec<NewCollectionMemberData>,
|
||||
groups: Vec<CollectionGroupData>,
|
||||
users: Vec<CollectionMembershipData>,
|
||||
id: Option<CollectionId>,
|
||||
external_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NewCollectionGroupData {
|
||||
struct CollectionGroupData {
|
||||
hide_passwords: bool,
|
||||
id: GroupId,
|
||||
read_only: bool,
|
||||
@@ -148,7 +146,7 @@ struct NewCollectionGroupData {
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NewCollectionMemberData {
|
||||
struct CollectionMembershipData {
|
||||
hide_passwords: bool,
|
||||
id: MembershipId,
|
||||
read_only: bool,
|
||||
@@ -376,6 +374,21 @@ async fn get_org_collections_details(
|
||||
|| (CONFIG.org_groups_enabled()
|
||||
&& GroupUser::has_full_access_by_member(&org_id, &member.uuid, &mut conn).await);
|
||||
|
||||
// Get all admins, owners and managers who can manage/access all
|
||||
// Those are currently not listed in the col_users but need to be listed too.
|
||||
let manage_all_members: Vec<Value> = Membership::find_confirmed_and_manage_all_by_org(&org_id, &mut conn)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|member| {
|
||||
json!({
|
||||
"id": member.uuid,
|
||||
"readOnly": false,
|
||||
"hidePasswords": false,
|
||||
"manage": true,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for col in Collection::find_by_organization(&org_id, &mut conn).await {
|
||||
// check whether the current user has access to the given collection
|
||||
let assigned = has_full_access_to_org
|
||||
@@ -384,7 +397,7 @@ async fn get_org_collections_details(
|
||||
&& GroupUser::has_access_to_collection_by_member(&col.uuid, &member.uuid, &mut conn).await);
|
||||
|
||||
// get the users assigned directly to the given collection
|
||||
let users: Vec<Value> = col_users
|
||||
let mut users: Vec<Value> = col_users
|
||||
.iter()
|
||||
.filter(|collection_member| collection_member.collection_uuid == col.uuid)
|
||||
.map(|collection_member| {
|
||||
@@ -393,6 +406,7 @@ async fn get_org_collections_details(
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
users.extend_from_slice(&manage_all_members);
|
||||
|
||||
// get the group details for the given collection
|
||||
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
|
||||
@@ -429,13 +443,13 @@ async fn _get_org_collections(org_id: &OrganizationId, conn: &mut DbConn) -> Val
|
||||
async fn post_organization_collections(
|
||||
org_id: OrganizationId,
|
||||
headers: ManagerHeadersLoose,
|
||||
data: Json<NewCollectionData>,
|
||||
data: Json<FullCollectionData>,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
if org_id != headers.membership.org_uuid {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let data: NewCollectionData = data.into_inner();
|
||||
let data: FullCollectionData = data.into_inner();
|
||||
|
||||
let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else {
|
||||
err!("Can't find organization details")
|
||||
@@ -488,29 +502,104 @@ async fn post_organization_collections(
|
||||
Ok(Json(collection.to_json_details(&headers.membership.user_uuid, None, &mut conn).await))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct BulkCollectionAccessData {
|
||||
collection_ids: Vec<CollectionId>,
|
||||
groups: Vec<CollectionGroupData>,
|
||||
users: Vec<CollectionMembershipData>,
|
||||
}
|
||||
|
||||
#[post("/organizations/<org_id>/collections/bulk-access", data = "<data>", rank = 1)]
|
||||
async fn post_bulk_access_collections(
|
||||
org_id: OrganizationId,
|
||||
headers: ManagerHeadersLoose,
|
||||
data: Json<BulkCollectionAccessData>,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
if org_id != headers.membership.org_uuid {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let data: BulkCollectionAccessData = data.into_inner();
|
||||
|
||||
if Organization::find_by_uuid(&org_id, &mut conn).await.is_none() {
|
||||
err!("Can't find organization details")
|
||||
};
|
||||
|
||||
for col_id in data.collection_ids {
|
||||
let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await else {
|
||||
err!("Collection not found")
|
||||
};
|
||||
|
||||
// update collection modification date
|
||||
collection.save(&mut conn).await?;
|
||||
|
||||
log_event(
|
||||
EventType::CollectionUpdated as i32,
|
||||
&collection.uuid,
|
||||
&org_id,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
CollectionGroup::delete_all_by_collection(&col_id, &mut conn).await?;
|
||||
for group in &data.groups {
|
||||
CollectionGroup::new(col_id.clone(), group.id.clone(), group.read_only, group.hide_passwords, group.manage)
|
||||
.save(&mut conn)
|
||||
.await?;
|
||||
}
|
||||
|
||||
CollectionUser::delete_all_by_collection(&col_id, &mut conn).await?;
|
||||
for user in &data.users {
|
||||
let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &mut conn).await else {
|
||||
err!("User is not part of organization")
|
||||
};
|
||||
|
||||
if member.access_all {
|
||||
continue;
|
||||
}
|
||||
|
||||
CollectionUser::save(
|
||||
&member.user_uuid,
|
||||
&col_id,
|
||||
user.read_only,
|
||||
user.hide_passwords,
|
||||
user.manage,
|
||||
&mut conn,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/organizations/<org_id>/collections/<col_id>", data = "<data>")]
|
||||
async fn put_organization_collection_update(
|
||||
org_id: OrganizationId,
|
||||
col_id: CollectionId,
|
||||
headers: ManagerHeaders,
|
||||
data: Json<NewCollectionData>,
|
||||
data: Json<FullCollectionData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
post_organization_collection_update(org_id, col_id, headers, data, conn).await
|
||||
}
|
||||
|
||||
#[post("/organizations/<org_id>/collections/<col_id>", data = "<data>")]
|
||||
#[post("/organizations/<org_id>/collections/<col_id>", data = "<data>", rank = 2)]
|
||||
async fn post_organization_collection_update(
|
||||
org_id: OrganizationId,
|
||||
col_id: CollectionId,
|
||||
headers: ManagerHeaders,
|
||||
data: Json<NewCollectionData>,
|
||||
data: Json<FullCollectionData>,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let data: NewCollectionData = data.into_inner();
|
||||
let data: FullCollectionData = data.into_inner();
|
||||
|
||||
if Organization::find_by_uuid(&org_id, &mut conn).await.is_none() {
|
||||
err!("Can't find organization details")
|
||||
@@ -608,6 +697,9 @@ async fn _delete_organization_collection(
|
||||
headers: &ManagerHeaders,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
if org_id != &headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let Some(collection) = Collection::find_by_uuid_and_org(col_id, org_id, conn).await else {
|
||||
err!("Collection not found", "Collection does not exist or does not belong to this organization")
|
||||
};
|
||||
@@ -634,15 +726,6 @@ async fn delete_organization_collection(
|
||||
_delete_organization_collection(&org_id, &col_id, &headers, &mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct DeleteCollectionData {
|
||||
#[allow(dead_code)]
|
||||
id: String,
|
||||
#[allow(dead_code)]
|
||||
org_id: OrganizationId,
|
||||
}
|
||||
|
||||
#[post("/organizations/<org_id>/collections/<col_id>/delete")]
|
||||
async fn post_organization_collection_delete(
|
||||
org_id: OrganizationId,
|
||||
@@ -781,7 +864,7 @@ async fn get_collection_users(
|
||||
async fn put_collection_users(
|
||||
org_id: OrganizationId,
|
||||
col_id: CollectionId,
|
||||
data: Json<Vec<MembershipData>>,
|
||||
data: Json<Vec<CollectionMembershipData>>,
|
||||
headers: ManagerHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
@@ -820,26 +903,31 @@ struct OrgIdData {
|
||||
|
||||
#[get("/ciphers/organization-details?<data..>")]
|
||||
async fn get_org_details(data: OrgIdData, headers: OrgMemberHeaders, mut conn: DbConn) -> JsonResult {
|
||||
if data.organization_id != headers.org_id {
|
||||
if data.organization_id != headers.membership.org_uuid {
|
||||
err_code!("Resource not found.", "Organization id's do not match", rocket::http::Status::NotFound.code);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await,
|
||||
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await?,
|
||||
"object": "list",
|
||||
"continuationToken": null,
|
||||
})))
|
||||
}
|
||||
|
||||
async fn _get_org_details(org_id: &OrganizationId, host: &str, user_id: &UserId, conn: &mut DbConn) -> Value {
|
||||
async fn _get_org_details(
|
||||
org_id: &OrganizationId,
|
||||
host: &str,
|
||||
user_id: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<Value, crate::Error> {
|
||||
let ciphers = Cipher::find_by_org(org_id, conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(user_id, CipherSyncType::Organization, conn).await;
|
||||
|
||||
let mut ciphers_json = Vec::with_capacity(ciphers.len());
|
||||
for c in ciphers {
|
||||
ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await);
|
||||
ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await?);
|
||||
}
|
||||
json!(ciphers_json)
|
||||
Ok(json!(ciphers_json))
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
@@ -913,24 +1001,6 @@ async fn post_org_keys(
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CollectionData {
|
||||
id: CollectionId,
|
||||
read_only: bool,
|
||||
hide_passwords: bool,
|
||||
manage: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct MembershipData {
|
||||
id: MembershipId,
|
||||
read_only: bool,
|
||||
hide_passwords: bool,
|
||||
manage: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct InviteData {
|
||||
@@ -939,8 +1009,6 @@ struct InviteData {
|
||||
r#type: NumberOrString,
|
||||
collections: Option<Vec<CollectionData>>,
|
||||
#[serde(default)]
|
||||
access_all: bool,
|
||||
#[serde(default)]
|
||||
permissions: HashMap<String, Value>,
|
||||
}
|
||||
|
||||
@@ -954,7 +1022,7 @@ async fn send_invite(
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let mut data: InviteData = data.into_inner();
|
||||
let data: InviteData = data.into_inner();
|
||||
|
||||
// HACK: We need the raw user-type to be sure custom role is selected to determine the access_all permission
|
||||
// The from_str() will convert the custom role type into a manager role type
|
||||
@@ -972,13 +1040,11 @@ async fn send_invite(
|
||||
// HACK: This converts the Custom role which has the `Manage all collections` box checked into an access_all flag
|
||||
// Since the parent checkbox is not sent to the server we need to check and verify the child checkboxes
|
||||
// If the box is not checked, the user will still be a manager, but not with the access_all permission
|
||||
if raw_type.eq("4")
|
||||
&& data.permissions.get("editAnyCollection") == Some(&json!(true))
|
||||
&& data.permissions.get("deleteAnyCollection") == Some(&json!(true))
|
||||
&& data.permissions.get("createNewCollections") == Some(&json!(true))
|
||||
{
|
||||
data.access_all = true;
|
||||
}
|
||||
let access_all = new_type >= MembershipType::Admin
|
||||
|| (raw_type.eq("4")
|
||||
&& data.permissions.get("editAnyCollection") == Some(&json!(true))
|
||||
&& data.permissions.get("deleteAnyCollection") == Some(&json!(true))
|
||||
&& data.permissions.get("createNewCollections") == Some(&json!(true)));
|
||||
|
||||
let mut user_created: bool = false;
|
||||
for email in data.emails.iter() {
|
||||
@@ -1016,7 +1082,6 @@ async fn send_invite(
|
||||
};
|
||||
|
||||
let mut new_member = Membership::new(user.uuid.clone(), org_id.clone());
|
||||
let access_all = data.access_all;
|
||||
new_member.access_all = access_all;
|
||||
new_member.atype = new_type;
|
||||
new_member.status = member_status;
|
||||
@@ -1130,6 +1195,9 @@ async fn reinvite_member(
|
||||
headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
_reinvite_member(&org_id, &member_id, &headers.user.email, &mut conn).await
|
||||
}
|
||||
|
||||
@@ -1347,6 +1415,9 @@ async fn _confirm_invite(
|
||||
conn: &mut DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
if org_id != &headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
if key.is_empty() || member_id.is_empty() {
|
||||
err!("Key or UserId is not set, unable to process request");
|
||||
}
|
||||
@@ -1410,7 +1481,7 @@ async fn _confirm_invite(
|
||||
let save_result = member_to_confirm.save(conn).await;
|
||||
|
||||
if let Some(user) = User::find_by_uuid(&member_to_confirm.user_uuid, conn).await {
|
||||
nt.send_user_update(UpdateType::SyncOrgKeys, &user).await;
|
||||
nt.send_user_update(UpdateType::SyncOrgKeys, &user, &headers.device.push_uuid, conn).await;
|
||||
}
|
||||
|
||||
save_result
|
||||
@@ -1467,8 +1538,6 @@ struct EditUserData {
|
||||
collections: Option<Vec<CollectionData>>,
|
||||
groups: Option<Vec<GroupId>>,
|
||||
#[serde(default)]
|
||||
access_all: bool,
|
||||
#[serde(default)]
|
||||
permissions: HashMap<String, Value>,
|
||||
}
|
||||
|
||||
@@ -1494,7 +1563,7 @@ async fn edit_member(
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let mut data: EditUserData = data.into_inner();
|
||||
let data: EditUserData = data.into_inner();
|
||||
|
||||
// HACK: We need the raw user-type to be sure custom role is selected to determine the access_all permission
|
||||
// The from_str() will convert the custom role type into a manager role type
|
||||
@@ -1507,13 +1576,11 @@ async fn edit_member(
|
||||
// HACK: This converts the Custom role which has the `Manage all collections` box checked into an access_all flag
|
||||
// Since the parent checkbox is not sent to the server we need to check and verify the child checkboxes
|
||||
// If the box is not checked, the user will still be a manager, but not with the access_all permission
|
||||
if raw_type.eq("4")
|
||||
&& data.permissions.get("editAnyCollection") == Some(&json!(true))
|
||||
&& data.permissions.get("deleteAnyCollection") == Some(&json!(true))
|
||||
&& data.permissions.get("createNewCollections") == Some(&json!(true))
|
||||
{
|
||||
data.access_all = true;
|
||||
}
|
||||
let access_all = new_type >= MembershipType::Admin
|
||||
|| (raw_type.eq("4")
|
||||
&& data.permissions.get("editAnyCollection") == Some(&json!(true))
|
||||
&& data.permissions.get("deleteAnyCollection") == Some(&json!(true))
|
||||
&& data.permissions.get("createNewCollections") == Some(&json!(true)));
|
||||
|
||||
let mut member_to_edit = match Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await {
|
||||
Some(member) => member,
|
||||
@@ -1559,7 +1626,7 @@ async fn edit_member(
|
||||
}
|
||||
}
|
||||
|
||||
member_to_edit.access_all = data.access_all;
|
||||
member_to_edit.access_all = access_all;
|
||||
member_to_edit.atype = new_type as i32;
|
||||
|
||||
// Delete all the odd collections
|
||||
@@ -1568,7 +1635,7 @@ async fn edit_member(
|
||||
}
|
||||
|
||||
// If no accessAll, add the collections received
|
||||
if !data.access_all {
|
||||
if !access_all {
|
||||
for col in data.collections.iter().flatten() {
|
||||
match Collection::find_by_uuid_and_org(&col.id, &org_id, &mut conn).await {
|
||||
None => err!("Collection not found in Organization"),
|
||||
@@ -1673,6 +1740,9 @@ async fn _delete_member(
|
||||
conn: &mut DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
if org_id != &headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let Some(member_to_delete) = Membership::find_by_uuid_and_org(member_id, org_id, conn).await else {
|
||||
err!("User to delete isn't member of the organization")
|
||||
};
|
||||
@@ -1701,7 +1771,7 @@ async fn _delete_member(
|
||||
.await;
|
||||
|
||||
if let Some(user) = User::find_by_uuid(&member_to_delete.user_uuid, conn).await {
|
||||
nt.send_user_update(UpdateType::SyncOrgKeys, &user).await;
|
||||
nt.send_user_update(UpdateType::SyncOrgKeys, &user, &headers.device.push_uuid, conn).await;
|
||||
}
|
||||
|
||||
member_to_delete.delete(conn).await
|
||||
@@ -1754,7 +1824,7 @@ use super::ciphers::CipherData;
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ImportData {
|
||||
ciphers: Vec<CipherData>,
|
||||
collections: Vec<NewCollectionData>,
|
||||
collections: Vec<FullCollectionData>,
|
||||
collection_relationships: Vec<RelationsData>,
|
||||
}
|
||||
|
||||
@@ -1767,16 +1837,20 @@ struct RelationsData {
|
||||
value: usize,
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/ImportCiphersController.cs#L62
|
||||
#[post("/ciphers/import-organization?<query..>", data = "<data>")]
|
||||
async fn post_org_import(
|
||||
query: OrgIdData,
|
||||
data: Json<ImportData>,
|
||||
headers: AdminHeaders,
|
||||
headers: OrgMemberHeaders,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: ImportData = data.into_inner();
|
||||
let org_id = query.organization_id;
|
||||
if org_id != headers.membership.org_uuid {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let data: ImportData = data.into_inner();
|
||||
|
||||
// Validate the import before continuing
|
||||
// Bitwarden does not process the import if there is one item invalid.
|
||||
@@ -1789,8 +1863,20 @@ async fn post_org_import(
|
||||
let mut collections: Vec<CollectionId> = Vec::with_capacity(data.collections.len());
|
||||
for col in data.collections {
|
||||
let collection_uuid = if existing_collections.contains(&col.id) {
|
||||
col.id.unwrap()
|
||||
let col_id = col.id.unwrap();
|
||||
// When not an Owner or Admin, check if the member is allowed to access the collection.
|
||||
if headers.membership.atype < MembershipType::Admin
|
||||
&& !Collection::can_access_collection(&headers.membership, &col_id, &mut conn).await
|
||||
{
|
||||
err!(Compact, "The current user isn't allowed to manage this collection")
|
||||
}
|
||||
col_id
|
||||
} else {
|
||||
// We do not allow users or managers which can not manage all collections to create new collections
|
||||
// If there is any collection other than an existing import collection, abort the import.
|
||||
if headers.membership.atype <= MembershipType::Manager && !headers.membership.has_full_access() {
|
||||
err!(Compact, "The current user isn't allowed to create new collections")
|
||||
}
|
||||
let new_collection = Collection::new(org_id.clone(), col.name, col.external_id);
|
||||
new_collection.save(&mut conn).await?;
|
||||
new_collection.uuid
|
||||
@@ -1813,7 +1899,17 @@ async fn post_org_import(
|
||||
// Always clear folder_id's via an organization import
|
||||
cipher_data.folder_id = None;
|
||||
let mut cipher = Cipher::new(cipher_data.r#type, cipher_data.name.clone());
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await.ok();
|
||||
update_cipher_from_data(
|
||||
&mut cipher,
|
||||
cipher_data,
|
||||
&headers,
|
||||
Some(collections.clone()),
|
||||
&mut conn,
|
||||
&nt,
|
||||
UpdateType::None,
|
||||
)
|
||||
.await
|
||||
.ok();
|
||||
ciphers.push(cipher.uuid);
|
||||
}
|
||||
|
||||
@@ -1844,12 +1940,6 @@ struct BulkCollectionsData {
|
||||
async fn post_bulk_collections(data: Json<BulkCollectionsData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: BulkCollectionsData = data.into_inner();
|
||||
|
||||
// This feature does not seem to be active on all the clients
|
||||
// To prevent future issues, add a check to block a call when this is set to true
|
||||
if data.remove_collections {
|
||||
err!("Bulk removing of collections is not yet implemented")
|
||||
}
|
||||
|
||||
// Get all the collection available to the user in one query
|
||||
// Also filter based upon the provided collections
|
||||
let user_collections: HashMap<CollectionId, Collection> =
|
||||
@@ -1878,8 +1968,16 @@ async fn post_bulk_collections(data: Json<BulkCollectionsData>, headers: Headers
|
||||
// Do not abort the operation just ignore it, it could be a cipher was just deleted for example
|
||||
if let Some(cipher) = Cipher::find_by_uuid_and_org(cipher_id, &data.organization_id, &mut conn).await {
|
||||
if cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await {
|
||||
for collection in &data.collection_ids {
|
||||
CollectionCipher::save(&cipher.uuid, collection, &mut conn).await?;
|
||||
// When selecting a specific collection from the left filter list, and use the bulk option, you can remove an item from that collection
|
||||
// In these cases the client will call this endpoint twice, once for adding the new collections and a second for deleting.
|
||||
if data.remove_collections {
|
||||
for collection in &data.collection_ids {
|
||||
CollectionCipher::delete(&cipher.uuid, collection, &mut conn).await?;
|
||||
}
|
||||
} else {
|
||||
for collection in &data.collection_ids {
|
||||
CollectionCipher::save(&cipher.uuid, collection, &mut conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -2356,6 +2454,9 @@ async fn _revoke_member(
|
||||
headers: &AdminHeaders,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
if org_id != &headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
match Membership::find_by_uuid_and_org(member_id, org_id, conn).await {
|
||||
Some(mut member) if member.status > MembershipStatus::Revoked as i32 => {
|
||||
if member.user_uuid == headers.user.uuid {
|
||||
@@ -2463,6 +2564,9 @@ async fn _restore_member(
|
||||
headers: &AdminHeaders,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
if org_id != &headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
match Membership::find_by_uuid_and_org(member_id, org_id, conn).await {
|
||||
Some(mut member) if member.status < MembershipStatus::Accepted as i32 => {
|
||||
if member.user_uuid == headers.user.uuid {
|
||||
@@ -2510,18 +2614,27 @@ async fn _restore_member(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/groups")]
|
||||
async fn get_groups(org_id: OrganizationId, headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult {
|
||||
async fn get_groups_data(
|
||||
details: bool,
|
||||
org_id: OrganizationId,
|
||||
headers: ManagerHeadersLoose,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
if org_id != headers.membership.org_uuid {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
|
||||
// Group::find_by_organization(&org_id, &mut conn).await.iter().map(Group::to_json).collect::<Value>()
|
||||
let groups = Group::find_by_organization(&org_id, &mut conn).await;
|
||||
let mut groups_json = Vec::with_capacity(groups.len());
|
||||
|
||||
for g in groups {
|
||||
groups_json.push(g.to_json_details(&mut conn).await)
|
||||
if details {
|
||||
for g in groups {
|
||||
groups_json.push(g.to_json_details(&mut conn).await)
|
||||
}
|
||||
} else {
|
||||
for g in groups {
|
||||
groups_json.push(g.to_json())
|
||||
}
|
||||
}
|
||||
groups_json
|
||||
} else {
|
||||
@@ -2537,9 +2650,14 @@ async fn get_groups(org_id: OrganizationId, headers: ManagerHeadersLoose, mut co
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/groups")]
|
||||
async fn get_groups(org_id: OrganizationId, headers: ManagerHeadersLoose, conn: DbConn) -> JsonResult {
|
||||
get_groups_data(false, org_id, headers, conn).await
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/groups/details", rank = 1)]
|
||||
async fn get_groups_details(org_id: OrganizationId, headers: ManagerHeadersLoose, conn: DbConn) -> JsonResult {
|
||||
get_groups(org_id, headers, conn).await
|
||||
get_groups_data(true, org_id, headers, conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -2549,7 +2667,7 @@ struct GroupRequest {
|
||||
#[serde(default)]
|
||||
access_all: bool,
|
||||
external_id: Option<String>,
|
||||
collections: Vec<SelectedCollection>,
|
||||
collections: Vec<CollectionData>,
|
||||
users: Vec<MembershipId>,
|
||||
}
|
||||
|
||||
@@ -2570,14 +2688,14 @@ impl GroupRequest {
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct SelectedCollection {
|
||||
struct CollectionData {
|
||||
id: CollectionId,
|
||||
read_only: bool,
|
||||
hide_passwords: bool,
|
||||
manage: bool,
|
||||
}
|
||||
|
||||
impl SelectedCollection {
|
||||
impl CollectionData {
|
||||
pub fn to_collection_group(&self, groups_uuid: GroupId) -> CollectionGroup {
|
||||
CollectionGroup::new(self.id.clone(), groups_uuid, self.read_only, self.hide_passwords, self.manage)
|
||||
}
|
||||
@@ -2601,6 +2719,9 @@ async fn post_groups(
|
||||
data: Json<GroupRequest>,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
if !CONFIG.org_groups_enabled() {
|
||||
err!("Group support is disabled");
|
||||
}
|
||||
@@ -2630,6 +2751,9 @@ async fn put_group(
|
||||
headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
if !CONFIG.org_groups_enabled() {
|
||||
err!("Group support is disabled");
|
||||
}
|
||||
@@ -2660,7 +2784,7 @@ async fn put_group(
|
||||
|
||||
async fn add_update_group(
|
||||
mut group: Group,
|
||||
collections: Vec<SelectedCollection>,
|
||||
collections: Vec<CollectionData>,
|
||||
members: Vec<MembershipId>,
|
||||
org_id: OrganizationId,
|
||||
headers: &AdminHeaders,
|
||||
@@ -2694,7 +2818,8 @@ async fn add_update_group(
|
||||
"organizationId": group.organizations_uuid,
|
||||
"name": group.name,
|
||||
"accessAll": group.access_all,
|
||||
"externalId": group.external_id
|
||||
"externalId": group.external_id,
|
||||
"object": "group"
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -2745,6 +2870,9 @@ async fn _delete_group(
|
||||
headers: &AdminHeaders,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
if org_id != &headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
if !CONFIG.org_groups_enabled() {
|
||||
err!("Group support is disabled");
|
||||
}
|
||||
@@ -2774,6 +2902,9 @@ async fn bulk_delete_groups(
|
||||
headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
if !CONFIG.org_groups_enabled() {
|
||||
err!("Group support is disabled");
|
||||
}
|
||||
@@ -2837,6 +2968,9 @@ async fn put_group_members(
|
||||
data: Json<Vec<MembershipId>>,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
if !CONFIG.org_groups_enabled() {
|
||||
err!("Group support is disabled");
|
||||
}
|
||||
@@ -3021,7 +3155,7 @@ async fn get_organization_public_key(
|
||||
headers: OrgMemberHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
if org_id != headers.org_id {
|
||||
if org_id != headers.membership.org_uuid {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else {
|
||||
@@ -3035,7 +3169,7 @@ async fn get_organization_public_key(
|
||||
}
|
||||
|
||||
// Obsolete - Renamed to public-key (2023.8), left for backwards compatibility with older clients
|
||||
// https://github.com/bitwarden/server/blob/25dc0c9178e3e3584074bbef0d4be827b7c89415/src/Api/AdminConsole/Controllers/OrganizationsController.cs#L463-L468
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Controllers/OrganizationsController.cs#L487-L492
|
||||
#[get("/organizations/<org_id>/keys")]
|
||||
async fn get_organization_keys(org_id: OrganizationId, headers: OrgMemberHeaders, conn: DbConn) -> JsonResult {
|
||||
get_organization_public_key(org_id, headers, conn).await
|
||||
@@ -3086,7 +3220,7 @@ async fn put_reset_password(
|
||||
user.set_password(reset_request.new_master_password_hash.as_str(), Some(reset_request.key), true, None);
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
nt.send_logout(&user, None, &mut conn).await;
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationUserAdminResetPassword as i32,
|
||||
@@ -3126,16 +3260,16 @@ async fn get_reset_password_details(
|
||||
|
||||
check_reset_password_applicable_and_permissions(&org_id, &member_id, &headers, &mut conn).await?;
|
||||
|
||||
// https://github.com/bitwarden/server/blob/3b50ccb9f804efaacdc46bed5b60e5b28eddefcf/src/Api/Models/Response/Organizations/OrganizationUserResponseModel.cs#L111
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Models/Response/Organizations/OrganizationUserResponseModel.cs#L190
|
||||
Ok(Json(json!({
|
||||
"object": "organizationUserResetPasswordDetails",
|
||||
"kdf":user.client_kdf_type,
|
||||
"kdfIterations":user.client_kdf_iter,
|
||||
"kdfMemory":user.client_kdf_memory,
|
||||
"kdfParallelism":user.client_kdf_parallelism,
|
||||
"resetPasswordKey":member.reset_password_key,
|
||||
"encryptedPrivateKey":org.private_key,
|
||||
|
||||
"organizationUserId": member_id,
|
||||
"kdf": user.client_kdf_type,
|
||||
"kdfIterations": user.client_kdf_iter,
|
||||
"kdfMemory": user.client_kdf_memory,
|
||||
"kdfParallelism": user.client_kdf_parallelism,
|
||||
"resetPasswordKey": member.reset_password_key,
|
||||
"encryptedPrivateKey": org.private_key,
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -3191,13 +3325,17 @@ async fn put_reset_password_enrollment(
|
||||
|
||||
let reset_request = data.into_inner();
|
||||
|
||||
if reset_request.reset_password_key.is_none()
|
||||
&& OrgPolicy::org_is_reset_password_auto_enroll(&org_id, &mut conn).await
|
||||
{
|
||||
let reset_password_key = match reset_request.reset_password_key {
|
||||
None => None,
|
||||
Some(ref key) if key.is_empty() => None,
|
||||
Some(key) => Some(key),
|
||||
};
|
||||
|
||||
if reset_password_key.is_none() && OrgPolicy::org_is_reset_password_auto_enroll(&org_id, &mut conn).await {
|
||||
err!("Reset password can't be withdrawn due to an enterprise policy");
|
||||
}
|
||||
|
||||
if reset_request.reset_password_key.is_some() {
|
||||
if reset_password_key.is_some() {
|
||||
PasswordOrOtpData {
|
||||
master_password_hash: reset_request.master_password_hash,
|
||||
otp: reset_request.otp,
|
||||
@@ -3206,7 +3344,7 @@ async fn put_reset_password_enrollment(
|
||||
.await?;
|
||||
}
|
||||
|
||||
member.reset_password_key = reset_request.reset_password_key;
|
||||
member.reset_password_key = reset_password_key;
|
||||
member.save(&mut conn).await?;
|
||||
|
||||
let log_id = if member.reset_password_key.is_some() {
|
||||
@@ -3220,57 +3358,22 @@ async fn put_reset_password_enrollment(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// This is a new function active since the v2022.9.x clients.
|
||||
// It combines the previous two calls done before.
|
||||
// We call those two functions here and combine them ourselves.
|
||||
//
|
||||
// NOTE: It seems clients can't handle uppercase-first keys!!
|
||||
// We need to convert all keys so they have the first character to be a lowercase.
|
||||
// Else the export will be just an empty JSON file.
|
||||
// We currently only support exports by members of the Admin or Owner status.
|
||||
// Vaultwarden does not yet support exporting only managed collections!
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/OrganizationExportController.cs#L52
|
||||
#[get("/organizations/<org_id>/export")]
|
||||
async fn get_org_export(
|
||||
org_id: OrganizationId,
|
||||
headers: AdminHeaders,
|
||||
client_version: Option<ClientVersion>,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
async fn get_org_export(org_id: OrganizationId, headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
// Since version v2023.1.0 the format of the export is different.
|
||||
// Also, this endpoint was created since v2022.9.0.
|
||||
// Therefore, we will check for any version smaller then v2023.1.0 and return a different response.
|
||||
// If we can't determine the version, we will use the latest default v2023.1.0 and higher.
|
||||
// https://github.com/bitwarden/server/blob/9ca93381ce416454734418c3a9f99ab49747f1b6/src/Api/Controllers/OrganizationExportController.cs#L44
|
||||
let use_list_response_model = if let Some(client_version) = client_version {
|
||||
let ver_match = semver::VersionReq::parse("<2023.1.0").unwrap();
|
||||
ver_match.matches(&client_version.0)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// Also both main keys here need to be lowercase, else the export will fail.
|
||||
if use_list_response_model {
|
||||
// Backwards compatible pre v2023.1.0 response
|
||||
Ok(Json(json!({
|
||||
"collections": {
|
||||
"data": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await),
|
||||
"object": "list",
|
||||
"continuationToken": null,
|
||||
},
|
||||
"ciphers": {
|
||||
"data": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await),
|
||||
"object": "list",
|
||||
"continuationToken": null,
|
||||
}
|
||||
})))
|
||||
} else {
|
||||
// v2023.1.0 and newer response
|
||||
Ok(Json(json!({
|
||||
"collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await),
|
||||
"ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await),
|
||||
})))
|
||||
}
|
||||
Ok(Json(json!({
|
||||
"collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await),
|
||||
"ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await?),
|
||||
})))
|
||||
}
|
||||
|
||||
async fn _api_key(
|
||||
@@ -3280,6 +3383,9 @@ async fn _api_key(
|
||||
headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
if org_id != &headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
|
@@ -46,7 +46,7 @@ struct OrgImportData {
|
||||
#[post("/public/organization/import", data = "<data>")]
|
||||
async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
|
||||
// Most of the logic for this function can be found here
|
||||
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/OrganizationService.cs#L1203
|
||||
|
||||
let org_id = token.0;
|
||||
let data = data.into_inner();
|
||||
|
@@ -1,7 +1,9 @@
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::{DateTime, TimeDelta, Utc};
|
||||
use num_traits::ToPrimitive;
|
||||
use once_cell::sync::Lazy;
|
||||
use rocket::form::Form;
|
||||
use rocket::fs::NamedFile;
|
||||
use rocket::fs::TempFile;
|
||||
@@ -11,12 +13,28 @@ use serde_json::Value;
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
|
||||
auth::{ClientIp, Headers, Host},
|
||||
config::PathType,
|
||||
db::{models::*, DbConn, DbPool},
|
||||
util::NumberOrString,
|
||||
util::{save_temp_file, NumberOrString},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||
static ANON_PUSH_DEVICE: Lazy<Device> = Lazy::new(|| {
|
||||
let dt = crate::util::parse_date("1970-01-01T00:00:00.000000Z");
|
||||
Device {
|
||||
uuid: String::from("00000000-0000-0000-0000-000000000000").into(),
|
||||
created_at: dt,
|
||||
updated_at: dt,
|
||||
user_uuid: String::from("00000000-0000-0000-0000-000000000000").into(),
|
||||
name: String::new(),
|
||||
atype: 14, // 14 == Unknown Browser
|
||||
push_uuid: Some(String::from("00000000-0000-0000-0000-000000000000").into()),
|
||||
push_token: None,
|
||||
refresh_token: String::new(),
|
||||
twofactor_remember: None,
|
||||
}
|
||||
});
|
||||
|
||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||
const SIZE_525_MB: i64 = 550_502_400;
|
||||
@@ -182,7 +200,7 @@ async fn post_send(data: Json<SendData>, headers: Headers, mut conn: DbConn, nt:
|
||||
UpdateType::SyncSendCreate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -204,13 +222,15 @@ struct UploadDataV2<'f> {
|
||||
// @deprecated Mar 25 2021: This method has been deprecated in favor of direct uploads (v2).
|
||||
// This method still exists to support older clients, probably need to remove it sometime.
|
||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L164-L167
|
||||
// 2025: This endpoint doesn't seem to exists anymore in the latest version
|
||||
// See: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/SendsController.cs
|
||||
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||
async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let UploadData {
|
||||
model,
|
||||
mut data,
|
||||
data,
|
||||
} = data.into_inner();
|
||||
let model = model.into_inner();
|
||||
|
||||
@@ -250,13 +270,8 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
||||
}
|
||||
|
||||
let file_id = crate::crypto::generate_send_file_id();
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.persist_to(&file_path).await {
|
||||
data.move_copy_to(file_path).await?
|
||||
}
|
||||
save_temp_file(PathType::Sends, &format!("{}/{file_id}", send.uuid), data, true).await?;
|
||||
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
@@ -272,7 +287,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
||||
UpdateType::SyncSendCreate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -280,7 +295,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/SendsController.cs#L165
|
||||
#[post("/sends/file/v2", data = "<data>")]
|
||||
async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
@@ -338,7 +353,7 @@ async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbC
|
||||
Ok(Json(json!({
|
||||
"fileUploadType": 0, // 0 == Direct | 1 == Azure
|
||||
"object": "send-fileUpload",
|
||||
"url": format!("/sends/{}/file/{}", send.uuid, file_id),
|
||||
"url": format!("/sends/{}/file/{file_id}", send.uuid),
|
||||
"sendResponse": send.to_json()
|
||||
})))
|
||||
}
|
||||
@@ -351,7 +366,7 @@ pub struct SendFileData {
|
||||
fileName: String,
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/66f95d1c443490b653e5a15d32977e2f5a3f9e32/src/Api/Tools/Controllers/SendsController.cs#L250
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/SendsController.cs#L195
|
||||
#[post("/sends/<send_id>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||
async fn post_send_file_v2_data(
|
||||
send_id: SendId,
|
||||
@@ -363,7 +378,7 @@ async fn post_send_file_v2_data(
|
||||
) -> EmptyResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let mut data = data.into_inner();
|
||||
let data = data.into_inner();
|
||||
|
||||
let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||
err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.")
|
||||
@@ -378,7 +393,11 @@ async fn post_send_file_v2_data(
|
||||
};
|
||||
|
||||
match data.data.raw_name() {
|
||||
Some(raw_file_name) if raw_file_name.dangerous_unsafe_unsanitized_raw() == send_data.fileName => (),
|
||||
Some(raw_file_name)
|
||||
if raw_file_name.dangerous_unsafe_unsanitized_raw() == send_data.fileName
|
||||
// be less strict only if using CLI, cf. https://github.com/dani-garcia/vaultwarden/issues/5614
|
||||
|| (headers.device.is_cli() && send_data.fileName.ends_with(raw_file_name.dangerous_unsafe_unsanitized_raw().as_str())
|
||||
) => {}
|
||||
Some(raw_file_name) => err!(
|
||||
"Send file name does not match.",
|
||||
format!(
|
||||
@@ -402,25 +421,15 @@ async fn post_send_file_v2_data(
|
||||
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
|
||||
}
|
||||
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_id);
|
||||
let file_path = folder_path.join(file_id);
|
||||
let file_path = format!("{send_id}/{file_id}");
|
||||
|
||||
// Check if the file already exists, if that is the case do not overwrite it
|
||||
if tokio::fs::metadata(&file_path).await.is_ok() {
|
||||
err!("Send file has already been uploaded.", format!("File {file_path:?} already exists"))
|
||||
}
|
||||
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
data.data.move_copy_to(file_path).await?
|
||||
}
|
||||
save_temp_file(PathType::Sends, &file_path, data.data, false).await?;
|
||||
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendCreate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -485,7 +494,7 @@ async fn post_access(
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&String::from("00000000-0000-0000-0000-000000000000").into(),
|
||||
&ANON_PUSH_DEVICE,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -542,20 +551,31 @@ async fn post_access_file(
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&String::from("00000000-0000-0000-0000-000000000000").into(),
|
||||
&ANON_PUSH_DEVICE,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
Ok(Json(json!({
|
||||
"object": "send-fileDownload",
|
||||
"id": file_id,
|
||||
"url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
|
||||
"url": download_url(&host, &send_id, &file_id).await?,
|
||||
})))
|
||||
}
|
||||
|
||||
async fn download_url(host: &Host, send_id: &SendId, file_id: &SendFileId) -> Result<String, crate::Error> {
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
|
||||
|
||||
if operator.info().scheme() == opendal::Scheme::Fs {
|
||||
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
|
||||
Ok(format!("{}/api/sends/{send_id}/{file_id}?t={token}", &host.host))
|
||||
} else {
|
||||
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_secs(5 * 60)).await?.uri().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||
async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(t) {
|
||||
@@ -641,7 +661,7 @@ pub async fn update_send_from_data(
|
||||
|
||||
send.save(conn).await?;
|
||||
if ut != UpdateType::None {
|
||||
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device.uuid, conn).await;
|
||||
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device, conn).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -657,7 +677,7 @@ async fn delete_send(send_id: SendId, headers: Headers, mut conn: DbConn, nt: No
|
||||
UpdateType::SyncSendDelete,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -679,7 +699,7 @@ async fn put_remove_password(send_id: SendId, headers: Headers, mut conn: DbConn
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&headers.device,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
@@ -34,6 +34,10 @@ async fn generate_authenticator(data: Json<PasswordOrOtpData>, headers: Headers,
|
||||
_ => (false, crypto::encode_random_bytes::<20>(BASE32)),
|
||||
};
|
||||
|
||||
// Upstream seems to also return `userVerificationToken`, but doesn't seem to be used at all.
|
||||
// It should help prevent TOTP disclosure if someone keeps their vault unlocked.
|
||||
// Since it doesn't seem to be used, and also does not cause any issues, lets leave it out of the response.
|
||||
// See: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Auth/Controllers/TwoFactorController.cs#L94
|
||||
Ok(Json(json!({
|
||||
"enabled": enabled,
|
||||
"key": key,
|
||||
@@ -148,7 +152,7 @@ pub async fn validate_totp_code(
|
||||
if generated == totp_code && time_step > twofactor.last_used {
|
||||
// If the step does not equals 0 the time is drifted either server or client side.
|
||||
if step != 0 {
|
||||
warn!("TOTP Time drift detected. The step offset is {}", step);
|
||||
warn!("TOTP Time drift detected. The step offset is {step}");
|
||||
}
|
||||
|
||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||
@@ -157,7 +161,7 @@ pub async fn validate_totp_code(
|
||||
twofactor.save(conn).await?;
|
||||
return Ok(());
|
||||
} else if generated == totp_code && time_step <= twofactor.last_used {
|
||||
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
||||
warn!("This TOTP or a TOTP code within {steps} steps back or forward has already been used!");
|
||||
err!(
|
||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||
ErrorEvent {
|
||||
|
@@ -118,6 +118,9 @@ async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbCo
|
||||
} else {
|
||||
json!({
|
||||
"enabled": enabled,
|
||||
"host": null,
|
||||
"clientSecret": null,
|
||||
"clientId": null,
|
||||
"object": "twoFactorDuo"
|
||||
})
|
||||
};
|
||||
@@ -202,7 +205,7 @@ async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData)
|
||||
use std::str::FromStr;
|
||||
|
||||
// https://duo.com/docs/authapi#api-details
|
||||
let url = format!("https://{}{}", &data.host, path);
|
||||
let url = format!("https://{}{path}", &data.host);
|
||||
let date = Utc::now().to_rfc2822();
|
||||
let username = &data.ik;
|
||||
let fields = [&date, method, &data.host, path, params];
|
||||
@@ -258,7 +261,7 @@ pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiRes
|
||||
}
|
||||
.map_res("Can't fetch Duo Keys")?;
|
||||
|
||||
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
|
||||
Ok((data.ik, data.sk, CONFIG.get_duo_akey().await, data.host))
|
||||
}
|
||||
|
||||
pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {
|
||||
@@ -274,9 +277,9 @@ pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult
|
||||
|
||||
fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64) -> String {
|
||||
let val = format!("{email}|{ikey}|{expire}");
|
||||
let cookie = format!("{}|{}", prefix, BASE64.encode(val.as_bytes()));
|
||||
let cookie = format!("{prefix}|{}", BASE64.encode(val.as_bytes()));
|
||||
|
||||
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
|
||||
format!("{cookie}|{}", crypto::hmac_sign(key, &cookie))
|
||||
}
|
||||
|
||||
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
|
@@ -21,7 +21,7 @@ use url::Url;
|
||||
|
||||
// The location on this service that Duo should redirect users to. For us, this is a bridge
|
||||
// built in to the Bitwarden clients.
|
||||
// See: https://github.com/bitwarden/clients/blob/main/apps/web/src/connectors/duo-redirect.ts
|
||||
// See: https://github.com/bitwarden/clients/blob/5fb46df3415aefced0b52f2db86c873962255448/apps/web/src/connectors/duo-redirect.ts
|
||||
const DUO_REDIRECT_LOCATION: &str = "duo-redirect-connector.html";
|
||||
|
||||
// Number of seconds that a JWT we generate for Duo should be valid for.
|
||||
@@ -182,7 +182,7 @@ impl DuoClient {
|
||||
HealthCheckResponse::HealthFail {
|
||||
message,
|
||||
message_detail,
|
||||
} => err!(format!("Duo health check FAIL response, msg: {}, detail: {}", message, message_detail)),
|
||||
} => err!(format!("Duo health check FAIL response, msg: {message}, detail: {message_detail}")),
|
||||
};
|
||||
|
||||
if health_stat != "OK" {
|
||||
@@ -275,7 +275,7 @@ impl DuoClient {
|
||||
|
||||
let status_code = res.status();
|
||||
if status_code != StatusCode::OK {
|
||||
err!(format!("Failure response from Duo: {}", status_code))
|
||||
err!(format!("Failure response from Duo: {status_code}"))
|
||||
}
|
||||
|
||||
let response: IdTokenResponse = match res.json::<IdTokenResponse>().await {
|
||||
@@ -478,7 +478,7 @@ pub async fn validate_duo_login(
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let d: Digest = digest(&SHA512_256, format!("{}{}", ctx.nonce, device_identifier).as_bytes());
|
||||
let d: Digest = digest(&SHA512_256, format!("{}{device_identifier}", ctx.nonce).as_bytes());
|
||||
let hash: String = HEXLOWER.encode(d.as_ref());
|
||||
|
||||
match client.exchange_authz_code_for_result(code, email, hash.as_str()).await {
|
||||
|
@@ -197,14 +197,20 @@ async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> Jso
|
||||
}
|
||||
|
||||
/// Validate the email code when used as TwoFactor token mechanism
|
||||
pub async fn validate_email_code_str(user_id: &UserId, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn validate_email_code_str(
|
||||
user_id: &UserId,
|
||||
token: &str,
|
||||
data: &str,
|
||||
ip: &std::net::IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
let mut email_data = EmailTokenData::from_json(data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Email as i32, conn)
|
||||
.await
|
||||
.map_res("Two factor not found")?;
|
||||
let Some(issued_token) = &email_data.last_token else {
|
||||
err!(
|
||||
"No token available",
|
||||
format!("No token available! IP: {ip}"),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
@@ -220,7 +226,7 @@ pub async fn validate_email_code_str(user_id: &UserId, token: &str, data: &str,
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
err!(
|
||||
"Token is invalid",
|
||||
format!("Token is invalid! IP: {ip}"),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
@@ -323,7 +329,7 @@ pub fn obscure_email(email: &str) -> String {
|
||||
}
|
||||
};
|
||||
|
||||
format!("{}@{}", new_name, &domain)
|
||||
format!("{new_name}@{domain}")
|
||||
}
|
||||
|
||||
pub async fn find_and_activate_email_2fa(user_id: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
|
@@ -145,15 +145,14 @@ async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut c
|
||||
|
||||
// Ensure they are valid OTPs
|
||||
for yubikey in &yubikeys {
|
||||
if yubikey.len() == 12 {
|
||||
// YubiKey ID
|
||||
if yubikey.is_empty() || yubikey.len() == 12 {
|
||||
continue;
|
||||
}
|
||||
|
||||
verify_yubikey_otp(yubikey.to_owned()).await.map_res("Invalid Yubikey OTP provided")?;
|
||||
}
|
||||
|
||||
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
|
||||
let yubikey_ids: Vec<String> = yubikeys.into_iter().filter_map(|x| x.get(..12).map(str::to_owned)).collect();
|
||||
|
||||
let yubikey_metadata = YubikeyMetadata {
|
||||
keys: yubikey_ids,
|
||||
|
141
src/api/icons.rs
141
src/api/icons.rs
@@ -14,14 +14,12 @@ use reqwest::{
|
||||
Client, Response,
|
||||
};
|
||||
use rocket::{http::ContentType, response::Redirect, Route};
|
||||
use tokio::{
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
};
|
||||
use svg_hush::{data_url_filter, Filter};
|
||||
|
||||
use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer};
|
||||
|
||||
use crate::{
|
||||
config::PathType,
|
||||
error::Error,
|
||||
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
|
||||
util::Cached,
|
||||
@@ -38,11 +36,29 @@ pub fn routes() -> Vec<Route> {
|
||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
// Generate the default headers
|
||||
let mut default_headers = HeaderMap::new();
|
||||
default_headers.insert(header::USER_AGENT, HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)"));
|
||||
default_headers.insert(header::ACCEPT, HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en,*;q=0.1"));
|
||||
default_headers.insert(
|
||||
header::USER_AGENT,
|
||||
HeaderValue::from_static(
|
||||
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
|
||||
),
|
||||
);
|
||||
default_headers.insert(header::ACCEPT, HeaderValue::from_static("text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en-US,en;q=0.9"));
|
||||
default_headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::PRAGMA, HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::UPGRADE_INSECURE_REQUESTS, HeaderValue::from_static("1"));
|
||||
|
||||
default_headers.insert("Sec-Ch-Ua-Mobile", HeaderValue::from_static("?0"));
|
||||
default_headers.insert("Sec-Ch-Ua-Platform", HeaderValue::from_static("Linux"));
|
||||
default_headers.insert(
|
||||
"Sec-Ch-Ua",
|
||||
HeaderValue::from_static("\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Google Chrome\";v=\"138\""),
|
||||
);
|
||||
|
||||
default_headers.insert("Sec-Fetch-Site", HeaderValue::from_static("none"));
|
||||
default_headers.insert("Sec-Fetch-Mode", HeaderValue::from_static("navigate"));
|
||||
default_headers.insert("Sec-Fetch-User", HeaderValue::from_static("?1"));
|
||||
default_headers.insert("Sec-Fetch-Dest", HeaderValue::from_static("document"));
|
||||
|
||||
// Generate the cookie store
|
||||
let cookie_store = Arc::new(Jar::default());
|
||||
@@ -56,6 +72,7 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||
.default_headers(default_headers.clone())
|
||||
.http1_title_case_headers()
|
||||
.build()
|
||||
.expect("Failed to build client")
|
||||
});
|
||||
@@ -63,15 +80,18 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
// Build Regex only once since this takes a lot of time.
|
||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||
|
||||
// The function name `icon_external` is checked in the `on_response` function in `AppHeaders`
|
||||
// It is used to prevent sending a specific header which breaks icon downloads.
|
||||
// If this function needs to be renamed, also adjust the code in `util.rs`
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon_external(domain: &str) -> Option<Redirect> {
|
||||
if !is_valid_domain(domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
warn!("Invalid domain: {domain}");
|
||||
return None;
|
||||
}
|
||||
|
||||
if should_block_address(domain) {
|
||||
warn!("Blocked address: {}", domain);
|
||||
warn!("Blocked address: {domain}");
|
||||
return None;
|
||||
}
|
||||
|
||||
@@ -93,7 +113,7 @@ async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||
|
||||
if !is_valid_domain(domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
warn!("Invalid domain: {domain}");
|
||||
return Cached::ttl(
|
||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
@@ -102,7 +122,7 @@ async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||
}
|
||||
|
||||
if should_block_address(domain) {
|
||||
warn!("Blocked address: {}", domain);
|
||||
warn!("Blocked address: {domain}");
|
||||
return Cached::ttl(
|
||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
@@ -127,7 +147,7 @@ fn is_valid_domain(domain: &str) -> bool {
|
||||
|
||||
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||
if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) {
|
||||
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
||||
debug!("Domain parse error: '{domain}' - {parse_error:?}");
|
||||
return false;
|
||||
} else if domain.is_empty()
|
||||
|| domain.contains("..")
|
||||
@@ -136,18 +156,17 @@ fn is_valid_domain(domain: &str) -> bool {
|
||||
|| domain.ends_with('-')
|
||||
{
|
||||
debug!(
|
||||
"Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'",
|
||||
domain
|
||||
"Domain validation error: '{domain}' is either empty, contains '..', starts with an '.', starts or ends with a '-'"
|
||||
);
|
||||
return false;
|
||||
} else if domain.len() > 255 {
|
||||
debug!("Domain validation error: '{}' exceeds 255 characters", domain);
|
||||
debug!("Domain validation error: '{domain}' exceeds 255 characters");
|
||||
return false;
|
||||
}
|
||||
|
||||
for c in domain.chars() {
|
||||
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
||||
debug!("Domain validation error: '{}' contains an invalid character '{}'", domain, c);
|
||||
debug!("Domain validation error: '{domain}' contains an invalid character '{c}'");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -156,7 +175,7 @@ fn is_valid_domain(domain: &str) -> bool {
|
||||
}
|
||||
|
||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||
let path = format!("{domain}.png");
|
||||
|
||||
// Check for expiration of negatively cached copy
|
||||
if icon_is_negcached(&path).await {
|
||||
@@ -164,10 +183,7 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
}
|
||||
|
||||
if let Some(icon) = get_cached_icon(&path).await {
|
||||
let icon_type = match get_icon_type(&icon) {
|
||||
Some(x) => x,
|
||||
_ => "x-icon",
|
||||
};
|
||||
let icon_type = get_icon_type(&icon).unwrap_or("x-icon");
|
||||
return Some((icon, icon_type.to_string()));
|
||||
}
|
||||
|
||||
@@ -178,7 +194,7 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
// Get the icon, or None in case of error
|
||||
match download_icon(domain).await {
|
||||
Ok((icon, icon_type)) => {
|
||||
save_icon(&path, &icon).await;
|
||||
save_icon(&path, icon.to_vec()).await;
|
||||
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -189,9 +205,9 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
return None;
|
||||
}
|
||||
|
||||
warn!("Unable to download icon: {:?}", e);
|
||||
warn!("Unable to download icon: {e:?}");
|
||||
let miss_indicator = path + ".miss";
|
||||
save_icon(&miss_indicator, &[]).await;
|
||||
save_icon(&miss_indicator, vec![]).await;
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -204,11 +220,9 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
}
|
||||
|
||||
// Try to read the cached icon, and return it if it exists
|
||||
if let Ok(mut f) = File::open(path).await {
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
if f.read_to_end(&mut buffer).await.is_ok() {
|
||||
return Some(buffer);
|
||||
if let Ok(operator) = CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
|
||||
if let Ok(buf) = operator.read(path).await {
|
||||
return Some(buf.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,9 +230,11 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
}
|
||||
|
||||
async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
||||
let meta = symlink_metadata(path).await?;
|
||||
let modified = meta.modified()?;
|
||||
let age = SystemTime::now().duration_since(modified)?;
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::IconCache)?;
|
||||
let meta = operator.stat(path).await?;
|
||||
let modified =
|
||||
meta.last_modified().ok_or_else(|| std::io::Error::other(format!("No last modified time for `{path}`")))?;
|
||||
let age = SystemTime::now().duration_since(modified.into())?;
|
||||
|
||||
Ok(ttl > 0 && ttl <= age.as_secs())
|
||||
}
|
||||
@@ -230,8 +246,13 @@ async fn icon_is_negcached(path: &str) -> bool {
|
||||
match expired {
|
||||
// No longer negatively cached, drop the marker
|
||||
Ok(true) => {
|
||||
if let Err(e) = remove_file(&miss_indicator).await {
|
||||
error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e);
|
||||
match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
|
||||
Ok(operator) => {
|
||||
if let Err(e) = operator.delete(&miss_indicator).await {
|
||||
error!("Could not remove negative cache indicator for icon {path:?}: {e:?}");
|
||||
}
|
||||
}
|
||||
Err(e) => error!("Could not remove negative cache indicator for icon {path:?}: {e:?}"),
|
||||
}
|
||||
false
|
||||
}
|
||||
@@ -317,7 +338,7 @@ struct IconUrlResult {
|
||||
|
||||
/// Returns a IconUrlResult which holds a Vector IconList and a string which holds the referer.
|
||||
/// There will always two items within the iconlist which holds http(s)://domain.tld/favicon.ico.
|
||||
/// This does not mean that that location does exists, but it is the default location browser use.
|
||||
/// This does not mean that location exists, but (it) is the default location the browser uses.
|
||||
///
|
||||
/// # Argument
|
||||
/// * `domain` - A string which holds the domain with extension.
|
||||
@@ -531,10 +552,10 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
icon_type = get_icon_type(&body);
|
||||
if icon_type.is_none() {
|
||||
debug!("Icon from {} data:image uri, is not a valid image type", domain);
|
||||
debug!("Icon from {domain} data:image uri, is not a valid image type");
|
||||
continue;
|
||||
}
|
||||
info!("Extracted icon from data:image uri for {}", domain);
|
||||
info!("Extracted icon from data:image uri for {domain}");
|
||||
buffer = body.freeze();
|
||||
break;
|
||||
}
|
||||
@@ -560,26 +581,46 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
|
||||
if buffer.is_empty() {
|
||||
err_silent!("Empty response or unable find a valid icon", domain);
|
||||
} else if icon_type == Some("svg+xml") {
|
||||
let mut svg_filter = Filter::new();
|
||||
svg_filter.set_data_url_filter(data_url_filter::allow_standard_images);
|
||||
let mut sanitized_svg = Vec::new();
|
||||
if svg_filter.filter(&*buffer, &mut sanitized_svg).is_err() {
|
||||
icon_type = None;
|
||||
buffer.clear();
|
||||
} else {
|
||||
buffer = sanitized_svg.into();
|
||||
}
|
||||
}
|
||||
|
||||
Ok((buffer, icon_type))
|
||||
}
|
||||
|
||||
async fn save_icon(path: &str, icon: &[u8]) {
|
||||
match File::create(path).await {
|
||||
Ok(mut f) => {
|
||||
f.write_all(icon).await.expect("Error writing icon file");
|
||||
}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder");
|
||||
}
|
||||
async fn save_icon(path: &str, icon: Vec<u8>) {
|
||||
let operator = match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
|
||||
Ok(operator) => operator,
|
||||
Err(e) => {
|
||||
warn!("Unable to save icon: {:?}", e);
|
||||
warn!("Failed to get OpenDAL operator while saving icon: {e}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = operator.write(path, icon).await {
|
||||
warn!("Unable to save icon: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||
fn check_svg_after_xml_declaration(bytes: &[u8]) -> Option<&'static str> {
|
||||
// Look for SVG tag within the first 1KB
|
||||
if let Ok(content) = std::str::from_utf8(&bytes[..bytes.len().min(1024)]) {
|
||||
if content.contains("<svg") || content.contains("<SVG") {
|
||||
return Some("svg+xml");
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
match bytes {
|
||||
[137, 80, 78, 71, ..] => Some("png"),
|
||||
[0, 0, 1, 0, ..] => Some("x-icon"),
|
||||
@@ -587,6 +628,8 @@ fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||
[255, 216, 255, ..] => Some("jpeg"),
|
||||
[71, 73, 70, 56, ..] => Some("gif"),
|
||||
[66, 77, ..] => Some("bmp"),
|
||||
[60, 115, 118, 103, ..] => Some("svg+xml"), // Normal svg
|
||||
[60, 63, 120, 109, 108, ..] => check_svg_after_xml_declaration(bytes), // An svg starting with <?xml
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -598,6 +641,12 @@ async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result<Bytes,
|
||||
let mut buf = BytesMut::new();
|
||||
let mut size = 0;
|
||||
while let Some(chunk) = stream.next().await {
|
||||
// It is possible that there might occure UnexpectedEof errors or others
|
||||
// This is most of the time no issue, and if there is no chunked data anymore or at all parsing the HTML will not happen anyway.
|
||||
// Therfore if chunk is an err, just break and continue with the data be have received.
|
||||
if chunk.is_err() {
|
||||
break;
|
||||
}
|
||||
let chunk = &chunk?;
|
||||
size += chunk.len();
|
||||
buf.extend(chunk);
|
||||
|
@@ -14,21 +14,27 @@ use crate::{
|
||||
log_user_event,
|
||||
two_factor::{authenticator, duo, duo_oidc, email, enforce_2fa_policy, webauthn, yubikey},
|
||||
},
|
||||
master_password_policy,
|
||||
push::register_push_device,
|
||||
ApiResult, EmptyResult, JsonResult,
|
||||
},
|
||||
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
|
||||
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp, ClientVersion},
|
||||
db::{models::*, DbConn},
|
||||
error::MapResult,
|
||||
mail, util, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![login, prelogin, identity_register]
|
||||
routes![login, prelogin, identity_register, register_verification_email, register_finish]
|
||||
}
|
||||
|
||||
#[post("/connect/token", data = "<data>")]
|
||||
async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||
async fn login(
|
||||
data: Form<ConnectData>,
|
||||
client_header: ClientHeaders,
|
||||
client_version: Option<ClientVersion>,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: ConnectData = data.into_inner();
|
||||
|
||||
let mut user_id: Option<UserId> = None;
|
||||
@@ -48,7 +54,7 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_password_login(data, &mut user_id, &mut conn, &client_header.ip).await
|
||||
_password_login(data, &mut user_id, &mut conn, &client_header.ip, &client_version).await
|
||||
}
|
||||
"client_credentials" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
@@ -112,7 +118,7 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec, data.client_id);
|
||||
device.save(conn).await?;
|
||||
|
||||
let result = json!({
|
||||
@@ -127,23 +133,12 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct MasterPasswordPolicy {
|
||||
min_complexity: u8,
|
||||
min_length: u32,
|
||||
require_lower: bool,
|
||||
require_upper: bool,
|
||||
require_numbers: bool,
|
||||
require_special: bool,
|
||||
enforce_on_login: bool,
|
||||
}
|
||||
|
||||
async fn _password_login(
|
||||
data: ConnectData,
|
||||
user_id: &mut Option<UserId>,
|
||||
conn: &mut DbConn,
|
||||
ip: &ClientIp,
|
||||
client_version: &Option<ClientVersion>,
|
||||
) -> JsonResult {
|
||||
// Validate scope
|
||||
let scope = data.scope.as_ref().unwrap();
|
||||
@@ -158,7 +153,7 @@ async fn _password_login(
|
||||
// Get the user
|
||||
let username = data.username.as_ref().unwrap().trim();
|
||||
let Some(mut user) = User::find_by_mail(username, conn).await else {
|
||||
err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {username}.", ip.ip))
|
||||
};
|
||||
|
||||
// Set the user_id here to be passed back used for event logging.
|
||||
@@ -168,7 +163,7 @@ async fn _password_login(
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
format!("IP: {}. Username: {username}.", ip.ip),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
@@ -182,7 +177,7 @@ async fn _password_login(
|
||||
let Some(auth_request) = AuthRequest::find_by_uuid_and_user(auth_request_id, &user.uuid, conn).await else {
|
||||
err!(
|
||||
"Auth request not found. Try again.",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
format!("IP: {}. Username: {username}.", ip.ip),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
@@ -200,7 +195,7 @@ async fn _password_login(
|
||||
{
|
||||
err!(
|
||||
"Username or access code is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
format!("IP: {}. Username: {username}.", ip.ip),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
@@ -209,7 +204,7 @@ async fn _password_login(
|
||||
} else if !user.check_valid_password(password) {
|
||||
err!(
|
||||
"Username or password is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
format!("IP: {}. Username: {username}.", ip.ip),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
@@ -222,7 +217,7 @@ async fn _password_login(
|
||||
user.set_password(password, None, false, None);
|
||||
|
||||
if let Err(e) = user.save(conn).await {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
error!("Error updating user: {e:#?}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,11 +236,11 @@ async fn _password_login(
|
||||
user.login_verify_count += 1;
|
||||
|
||||
if let Err(e) = user.save(conn).await {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
error!("Error updating user: {e:#?}");
|
||||
}
|
||||
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await {
|
||||
error!("Error auto-sending email verification email: {:#?}", e);
|
||||
error!("Error auto-sending email verification email: {e:#?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -253,7 +248,7 @@ async fn _password_login(
|
||||
// We still want the login to fail until they actually verified the email address
|
||||
err!(
|
||||
"Please verify your email before trying again.",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
format!("IP: {}. Username: {username}.", ip.ip),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
@@ -262,11 +257,11 @@ async fn _password_login(
|
||||
|
||||
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
||||
|
||||
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
|
||||
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, client_version, conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
error!("Error sending new device email: {e:#?}");
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
err!(
|
||||
@@ -291,38 +286,10 @@ async fn _password_login(
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec, data.client_id);
|
||||
device.save(conn).await?;
|
||||
|
||||
// Fetch all valid Master Password Policies and merge them into one with all true's and larges numbers as one policy
|
||||
let master_password_policies: Vec<MasterPasswordPolicy> =
|
||||
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(
|
||||
&user.uuid,
|
||||
OrgPolicyType::MasterPassword,
|
||||
conn,
|
||||
)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|p| serde_json::from_str(&p.data).ok())
|
||||
.collect();
|
||||
|
||||
let master_password_policy = if !master_password_policies.is_empty() {
|
||||
let mut mpp_json = json!(master_password_policies.into_iter().reduce(|acc, policy| {
|
||||
MasterPasswordPolicy {
|
||||
min_complexity: acc.min_complexity.max(policy.min_complexity),
|
||||
min_length: acc.min_length.max(policy.min_length),
|
||||
require_lower: acc.require_lower || policy.require_lower,
|
||||
require_upper: acc.require_upper || policy.require_upper,
|
||||
require_numbers: acc.require_numbers || policy.require_numbers,
|
||||
require_special: acc.require_special || policy.require_special,
|
||||
enforce_on_login: acc.enforce_on_login || policy.enforce_on_login,
|
||||
}
|
||||
}));
|
||||
mpp_json["object"] = json!("masterPasswordPolicy");
|
||||
mpp_json
|
||||
} else {
|
||||
json!({"object": "masterPasswordPolicy"})
|
||||
};
|
||||
let master_password_policy = master_password_policy(&user, conn).await;
|
||||
|
||||
let mut result = json!({
|
||||
"access_token": access_token,
|
||||
@@ -352,7 +319,7 @@ async fn _password_login(
|
||||
result["TwoFactorToken"] = Value::String(token);
|
||||
}
|
||||
|
||||
info!("User {} logged in successfully. IP: {}", username, ip.ip);
|
||||
info!("User {username} logged in successfully. IP: {}", ip.ip);
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
@@ -420,7 +387,7 @@ async fn _user_api_key_login(
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
let now = Utc::now().naive_utc();
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
error!("Error sending new device email: {e:#?}");
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
err!(
|
||||
@@ -441,7 +408,7 @@ async fn _user_api_key_login(
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec, data.client_id);
|
||||
device.save(conn).await?;
|
||||
|
||||
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
||||
@@ -520,6 +487,7 @@ async fn twofactor_auth(
|
||||
data: &ConnectData,
|
||||
device: &mut Device,
|
||||
ip: &ClientIp,
|
||||
client_version: &Option<ClientVersion>,
|
||||
conn: &mut DbConn,
|
||||
) -> ApiResult<Option<String>> {
|
||||
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
|
||||
@@ -538,7 +506,10 @@ async fn twofactor_auth(
|
||||
let twofactor_code = match data.two_factor_token {
|
||||
Some(ref code) => code,
|
||||
None => {
|
||||
err_json!(_json_err_twofactor(&twofactor_ids, &user.uuid, data, conn).await?, "2FA token not provided")
|
||||
err_json!(
|
||||
_json_err_twofactor(&twofactor_ids, &user.uuid, data, client_version, conn).await?,
|
||||
"2FA token not provided"
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -575,7 +546,7 @@ async fn twofactor_auth(
|
||||
}
|
||||
}
|
||||
Some(TwoFactorType::Email) => {
|
||||
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, conn).await?
|
||||
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, &ip.ip, conn).await?
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Remember) => {
|
||||
@@ -585,7 +556,7 @@ async fn twofactor_auth(
|
||||
}
|
||||
_ => {
|
||||
err_json!(
|
||||
_json_err_twofactor(&twofactor_ids, &user.uuid, data, conn).await?,
|
||||
_json_err_twofactor(&twofactor_ids, &user.uuid, data, client_version, conn).await?,
|
||||
"2FA Remember token not provided"
|
||||
)
|
||||
}
|
||||
@@ -617,6 +588,7 @@ async fn _json_err_twofactor(
|
||||
providers: &[i32],
|
||||
user_id: &UserId,
|
||||
data: &ConnectData,
|
||||
client_version: &Option<ClientVersion>,
|
||||
conn: &mut DbConn,
|
||||
) -> ApiResult<Value> {
|
||||
let mut result = json!({
|
||||
@@ -689,8 +661,16 @@ async fn _json_err_twofactor(
|
||||
err!("No twofactor email registered")
|
||||
};
|
||||
|
||||
// Send email immediately if email is the only 2FA option
|
||||
if providers.len() == 1 {
|
||||
// Starting with version 2025.5.0 the client will call `/api/two-factor/send-email-login`.
|
||||
let disabled_send = if let Some(cv) = client_version {
|
||||
let ver_match = semver::VersionReq::parse(">=2025.5.0").unwrap();
|
||||
ver_match.matches(&cv.0)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// Send email immediately if email is the only 2FA option.
|
||||
if providers.len() == 1 && !disabled_send {
|
||||
email::send_token(user_id, conn).await?
|
||||
}
|
||||
|
||||
@@ -714,7 +694,69 @@ async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, conn).await
|
||||
_register(data, false, conn).await
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RegisterVerificationData {
|
||||
email: String,
|
||||
name: Option<String>,
|
||||
// receiveMarketingEmails: bool,
|
||||
}
|
||||
|
||||
#[derive(rocket::Responder)]
|
||||
enum RegisterVerificationResponse {
|
||||
NoContent(()),
|
||||
Token(Json<String>),
|
||||
}
|
||||
|
||||
#[post("/accounts/register/send-verification-email", data = "<data>")]
|
||||
async fn register_verification_email(
|
||||
data: Json<RegisterVerificationData>,
|
||||
mut conn: DbConn,
|
||||
) -> ApiResult<RegisterVerificationResponse> {
|
||||
let data = data.into_inner();
|
||||
|
||||
// the registration can only continue if signup is allowed or there exists an invitation
|
||||
if !(CONFIG.is_signup_allowed(&data.email)
|
||||
|| (!CONFIG.mail_enabled() && Invitation::find_by_mail(&data.email, &mut conn).await.is_some()))
|
||||
{
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
|
||||
let should_send_mail = CONFIG.mail_enabled() && CONFIG.signups_verify();
|
||||
|
||||
let token_claims =
|
||||
crate::auth::generate_register_verify_claims(data.email.clone(), data.name.clone(), should_send_mail);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
|
||||
if should_send_mail {
|
||||
let user = User::find_by_mail(&data.email, &mut conn).await;
|
||||
if user.filter(|u| u.private_key.is_some()).is_some() {
|
||||
// There is still a timing side channel here in that the code
|
||||
// paths that send mail take noticeably longer than ones that
|
||||
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||
let mut rng = SmallRng::from_os_rng();
|
||||
let delta: i32 = 100;
|
||||
let sleep_ms = (1_000 + rng.random_range(-delta..=delta)) as u64;
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
|
||||
} else {
|
||||
mail::send_register_verify_email(&data.email, &token).await?;
|
||||
}
|
||||
|
||||
Ok(RegisterVerificationResponse::NoContent(()))
|
||||
} else {
|
||||
// If email verification is not required, return the token directly
|
||||
// the clients will use this token to finish the registration
|
||||
Ok(RegisterVerificationResponse::Token(Json(token)))
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/accounts/register/finish", data = "<data>")]
|
||||
async fn register_finish(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, true, conn).await
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
|
||||
|
@@ -32,7 +32,10 @@ pub use crate::api::{
|
||||
web::routes as web_routes,
|
||||
web::static_files,
|
||||
};
|
||||
use crate::db::{models::User, DbConn};
|
||||
use crate::db::{
|
||||
models::{OrgPolicy, OrgPolicyType, User},
|
||||
DbConn,
|
||||
};
|
||||
|
||||
// Type aliases for API methods results
|
||||
type ApiResult<T> = Result<T, crate::error::Error>;
|
||||
@@ -68,3 +71,49 @@ impl PasswordOrOtpData {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct MasterPasswordPolicy {
|
||||
min_complexity: Option<u8>,
|
||||
min_length: Option<u32>,
|
||||
require_lower: bool,
|
||||
require_upper: bool,
|
||||
require_numbers: bool,
|
||||
require_special: bool,
|
||||
enforce_on_login: bool,
|
||||
}
|
||||
|
||||
// Fetch all valid Master Password Policies and merge them into one with all trues and largest numbers as one policy
|
||||
async fn master_password_policy(user: &User, conn: &DbConn) -> Value {
|
||||
let master_password_policies: Vec<MasterPasswordPolicy> =
|
||||
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(
|
||||
&user.uuid,
|
||||
OrgPolicyType::MasterPassword,
|
||||
conn,
|
||||
)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|p| serde_json::from_str(&p.data).ok())
|
||||
.collect();
|
||||
|
||||
let mut mpp_json = if !master_password_policies.is_empty() {
|
||||
json!(master_password_policies.into_iter().reduce(|acc, policy| {
|
||||
MasterPasswordPolicy {
|
||||
min_complexity: acc.min_complexity.max(policy.min_complexity),
|
||||
min_length: acc.min_length.max(policy.min_length),
|
||||
require_lower: acc.require_lower || policy.require_lower,
|
||||
require_upper: acc.require_upper || policy.require_upper,
|
||||
require_numbers: acc.require_numbers || policy.require_numbers,
|
||||
require_special: acc.require_special || policy.require_special,
|
||||
enforce_on_login: acc.enforce_on_login || policy.enforce_on_login,
|
||||
}
|
||||
}))
|
||||
} else {
|
||||
json!({})
|
||||
};
|
||||
|
||||
// NOTE: Upstream still uses PascalCase here for `Object`!
|
||||
mpp_json["Object"] = json!("masterPasswordPolicy");
|
||||
mpp_json
|
||||
}
|
||||
|
@@ -10,7 +10,7 @@ use rocket_ws::{Message, WebSocket};
|
||||
use crate::{
|
||||
auth::{ClientIp, WsAccessTokenHeader},
|
||||
db::{
|
||||
models::{AuthRequestId, Cipher, CollectionId, DeviceId, Folder, Send as DbSend, User, UserId},
|
||||
models::{AuthRequestId, Cipher, CollectionId, Device, DeviceId, Folder, PushId, Send as DbSend, User, UserId},
|
||||
DbConn,
|
||||
},
|
||||
Error, CONFIG,
|
||||
@@ -339,7 +339,7 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
// NOTE: The last modified date needs to be updated before calling these methods
|
||||
pub async fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &mut DbConn) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
@@ -355,11 +355,11 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_user_update(ut, user);
|
||||
push_user_update(ut, user, push_uuid, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_logout(&self, user: &User, acting_device_id: Option<DeviceId>) {
|
||||
pub async fn send_logout(&self, user: &User, acting_device_id: Option<DeviceId>, conn: &mut DbConn) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
@@ -375,17 +375,11 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_logout(user, acting_device_id.clone());
|
||||
push_logout(user, acting_device_id.clone(), conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_folder_update(
|
||||
&self,
|
||||
ut: UpdateType,
|
||||
folder: &Folder,
|
||||
acting_device_id: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
pub async fn send_folder_update(&self, ut: UpdateType, folder: &Folder, device: &Device, conn: &mut DbConn) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
@@ -397,7 +391,7 @@ impl WebSocketUsers {
|
||||
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
||||
],
|
||||
ut,
|
||||
Some(acting_device_id.clone()),
|
||||
Some(device.uuid.clone()),
|
||||
);
|
||||
|
||||
if CONFIG.enable_websocket() {
|
||||
@@ -405,7 +399,7 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_folder_update(ut, folder, acting_device_id, conn).await;
|
||||
push_folder_update(ut, folder, device, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -414,7 +408,7 @@ impl WebSocketUsers {
|
||||
ut: UpdateType,
|
||||
cipher: &Cipher,
|
||||
user_ids: &[UserId],
|
||||
acting_device_id: &DeviceId,
|
||||
device: &Device,
|
||||
collection_uuids: Option<Vec<CollectionId>>,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
@@ -444,7 +438,7 @@ impl WebSocketUsers {
|
||||
("RevisionDate".into(), revision_date),
|
||||
],
|
||||
ut,
|
||||
Some(acting_device_id.clone()),
|
||||
Some(device.uuid.clone()), // Acting device id (unique device/app uuid)
|
||||
);
|
||||
|
||||
if CONFIG.enable_websocket() {
|
||||
@@ -454,7 +448,7 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() && user_ids.len() == 1 {
|
||||
push_cipher_update(ut, cipher, acting_device_id, conn).await;
|
||||
push_cipher_update(ut, cipher, device, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -463,7 +457,7 @@ impl WebSocketUsers {
|
||||
ut: UpdateType,
|
||||
send: &DbSend,
|
||||
user_ids: &[UserId],
|
||||
acting_device_id: &DeviceId,
|
||||
device: &Device,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
@@ -488,15 +482,15 @@ impl WebSocketUsers {
|
||||
}
|
||||
}
|
||||
if CONFIG.push_enabled() && user_ids.len() == 1 {
|
||||
push_send_update(ut, send, acting_device_id, conn).await;
|
||||
push_send_update(ut, send, device, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_auth_request(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
auth_request_uuid: &String,
|
||||
acting_device_id: &DeviceId,
|
||||
auth_request_uuid: &str,
|
||||
device: &Device,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
@@ -504,16 +498,16 @@ impl WebSocketUsers {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_id.to_string().into())],
|
||||
vec![("Id".into(), auth_request_uuid.to_owned().into()), ("UserId".into(), user_id.to_string().into())],
|
||||
UpdateType::AuthRequest,
|
||||
Some(acting_device_id.clone()),
|
||||
Some(device.uuid.clone()),
|
||||
);
|
||||
if CONFIG.enable_websocket() {
|
||||
self.send_update(user_id, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_auth_request(user_id.clone(), auth_request_uuid.to_string(), conn).await;
|
||||
push_auth_request(user_id, auth_request_uuid, device, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -521,7 +515,7 @@ impl WebSocketUsers {
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
auth_request_id: &AuthRequestId,
|
||||
approving_device_id: &DeviceId,
|
||||
device: &Device,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
@@ -531,14 +525,14 @@ impl WebSocketUsers {
|
||||
let data = create_update(
|
||||
vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())],
|
||||
UpdateType::AuthRequestResponse,
|
||||
Some(approving_device_id.clone()),
|
||||
Some(device.uuid.clone()),
|
||||
);
|
||||
if CONFIG.enable_websocket() {
|
||||
self.send_update(user_id, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_auth_response(user_id, auth_request_id, approving_device_id, conn).await;
|
||||
push_auth_response(user_id, auth_request_id, device, conn).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
244
src/api/push.rs
244
src/api/push.rs
@@ -7,9 +7,9 @@ use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, UpdateType},
|
||||
db::models::{AuthRequestId, Cipher, Device, DeviceId, Folder, Send, User, UserId},
|
||||
db::models::{AuthRequestId, Cipher, Device, DeviceId, Folder, PushId, Send, User, UserId},
|
||||
http_client::make_http_request,
|
||||
util::format_date,
|
||||
util::{format_date, get_uuid},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -28,20 +28,20 @@ struct LocalAuthPushToken {
|
||||
valid_until: Instant,
|
||||
}
|
||||
|
||||
async fn get_auth_push_token() -> ApiResult<String> {
|
||||
static PUSH_TOKEN: Lazy<RwLock<LocalAuthPushToken>> = Lazy::new(|| {
|
||||
async fn get_auth_api_token() -> ApiResult<String> {
|
||||
static API_TOKEN: Lazy<RwLock<LocalAuthPushToken>> = Lazy::new(|| {
|
||||
RwLock::new(LocalAuthPushToken {
|
||||
access_token: String::new(),
|
||||
valid_until: Instant::now(),
|
||||
})
|
||||
});
|
||||
let push_token = PUSH_TOKEN.read().await;
|
||||
let api_token = API_TOKEN.read().await;
|
||||
|
||||
if push_token.valid_until.saturating_duration_since(Instant::now()).as_secs() > 0 {
|
||||
if api_token.valid_until.saturating_duration_since(Instant::now()).as_secs() > 0 {
|
||||
debug!("Auth Push token still valid, no need for a new one");
|
||||
return Ok(push_token.access_token.clone());
|
||||
return Ok(api_token.access_token.clone());
|
||||
}
|
||||
drop(push_token); // Drop the read lock now
|
||||
drop(api_token); // Drop the read lock now
|
||||
|
||||
let installation_id = CONFIG.push_installation_id();
|
||||
let client_id = format!("installation.{installation_id}");
|
||||
@@ -68,44 +68,48 @@ async fn get_auth_push_token() -> ApiResult<String> {
|
||||
Err(e) => err!(format!("Unexpected push token received from bitwarden server: {e}")),
|
||||
};
|
||||
|
||||
let mut push_token = PUSH_TOKEN.write().await;
|
||||
push_token.valid_until = Instant::now()
|
||||
let mut api_token = API_TOKEN.write().await;
|
||||
api_token.valid_until = Instant::now()
|
||||
.checked_add(Duration::new((json_pushtoken.expires_in / 2) as u64, 0)) // Token valid for half the specified time
|
||||
.unwrap();
|
||||
|
||||
push_token.access_token = json_pushtoken.access_token;
|
||||
api_token.access_token = json_pushtoken.access_token;
|
||||
|
||||
debug!("Token still valid for {}", push_token.valid_until.saturating_duration_since(Instant::now()).as_secs());
|
||||
Ok(push_token.access_token.clone())
|
||||
debug!("Token still valid for {}", api_token.valid_until.saturating_duration_since(Instant::now()).as_secs());
|
||||
Ok(api_token.access_token.clone())
|
||||
}
|
||||
|
||||
pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbConn) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() || !device.is_push_device() || device.is_registered() {
|
||||
if !CONFIG.push_enabled() || !device.is_push_device() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if device.push_token.is_none() {
|
||||
warn!("Skipping the registration of the device {} because the push_token field is empty.", device.uuid);
|
||||
warn!("To get rid of this message you need to clear the app data and reconnect the device.");
|
||||
warn!("Skipping the registration of the device {:?} because the push_token field is empty.", device.uuid);
|
||||
warn!("To get rid of this message you need to logout, clear the app data and login again on the device.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!("Registering Device {}", device.uuid);
|
||||
debug!("Registering Device {:?}", device.push_uuid);
|
||||
|
||||
// generate a random push_uuid so we know the device is registered
|
||||
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
||||
// Generate a random push_uuid so if it doesn't already have one
|
||||
if device.push_uuid.is_none() {
|
||||
device.push_uuid = Some(PushId(get_uuid()));
|
||||
}
|
||||
|
||||
//Needed to register a device for push to bitwarden :
|
||||
let data = json!({
|
||||
"deviceId": device.push_uuid, // Unique UUID per user/device
|
||||
"pushToken": device.push_token,
|
||||
"userId": device.user_uuid,
|
||||
"deviceId": device.push_uuid,
|
||||
"identifier": device.uuid,
|
||||
"type": device.atype,
|
||||
"pushToken": device.push_token
|
||||
"identifier": device.uuid, // Unique UUID of the device/app, determined by the device/app it self currently registering
|
||||
// "organizationIds:" [] // TODO: This is not yet implemented by Vaultwarden!
|
||||
"installationId": CONFIG.push_installation_id(),
|
||||
});
|
||||
|
||||
let auth_push_token = get_auth_push_token().await?;
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
let auth_api_token = get_auth_api_token().await?;
|
||||
let auth_header = format!("Bearer {auth_api_token}");
|
||||
|
||||
if let Err(e) = make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/register"))?
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
@@ -126,18 +130,21 @@ pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbC
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn unregister_push_device(push_id: Option<String>) -> EmptyResult {
|
||||
pub async fn unregister_push_device(push_id: &Option<PushId>) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() || push_id.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
let auth_push_token = get_auth_push_token().await?;
|
||||
let auth_api_token = get_auth_api_token().await?;
|
||||
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
let auth_header = format!("Bearer {auth_api_token}");
|
||||
|
||||
match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_id.unwrap()))?
|
||||
.header(AUTHORIZATION, auth_header)
|
||||
.send()
|
||||
.await
|
||||
match make_http_request(
|
||||
Method::POST,
|
||||
&format!("{}/push/delete/{}", CONFIG.push_relay_uri(), push_id.as_ref().unwrap()),
|
||||
)?
|
||||
.header(AUTHORIZATION, auth_header)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("An error occurred during device unregistration: {e}")),
|
||||
@@ -145,12 +152,7 @@ pub async fn unregister_push_device(push_id: Option<String>) -> EmptyResult {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn push_cipher_update(
|
||||
ut: UpdateType,
|
||||
cipher: &Cipher,
|
||||
acting_device_id: &DeviceId,
|
||||
conn: &mut crate::db::DbConn,
|
||||
) {
|
||||
pub async fn push_cipher_update(ut: UpdateType, cipher: &Cipher, device: &Device, conn: &mut crate::db::DbConn) {
|
||||
// We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too.
|
||||
if cipher.organization_uuid.is_some() {
|
||||
return;
|
||||
@@ -163,87 +165,97 @@ pub async fn push_cipher_update(
|
||||
if Device::check_user_has_push_device(user_id, conn).await {
|
||||
send_to_push_relay(json!({
|
||||
"userId": user_id,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_id,
|
||||
"identifier": acting_device_id,
|
||||
"organizationId": null,
|
||||
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"Id": cipher.uuid,
|
||||
"UserId": cipher.user_uuid,
|
||||
"OrganizationId": (),
|
||||
"RevisionDate": format_date(&cipher.updated_at)
|
||||
}
|
||||
"id": cipher.uuid,
|
||||
"userId": cipher.user_uuid,
|
||||
"organizationId": null,
|
||||
"collectionIds": null,
|
||||
"revisionDate": format_date(&cipher.updated_at)
|
||||
},
|
||||
"clientType": null,
|
||||
"installationId": null
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_logout(user: &User, acting_device_id: Option<DeviceId>) {
|
||||
pub async fn push_logout(user: &User, acting_device_id: Option<DeviceId>, conn: &mut crate::db::DbConn) {
|
||||
let acting_device_id: Value = acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| Value::Null);
|
||||
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user.uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_id,
|
||||
"identifier": acting_device_id,
|
||||
"type": UpdateType::LogOut as i32,
|
||||
"payload": {
|
||||
"UserId": user.uuid,
|
||||
"Date": format_date(&user.updated_at)
|
||||
}
|
||||
})));
|
||||
}
|
||||
|
||||
pub fn push_user_update(ut: UpdateType, user: &User) {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user.uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": (),
|
||||
"identifier": (),
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"UserId": user.uuid,
|
||||
"Date": format_date(&user.updated_at)
|
||||
}
|
||||
})));
|
||||
}
|
||||
|
||||
pub async fn push_folder_update(
|
||||
ut: UpdateType,
|
||||
folder: &Folder,
|
||||
acting_device_id: &DeviceId,
|
||||
conn: &mut crate::db::DbConn,
|
||||
) {
|
||||
if Device::check_user_has_push_device(&folder.user_uuid, conn).await {
|
||||
if Device::check_user_has_push_device(&user.uuid, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": folder.user_uuid,
|
||||
"userId": user.uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_id,
|
||||
"identifier": acting_device_id,
|
||||
"type": ut as i32,
|
||||
"type": UpdateType::LogOut as i32,
|
||||
"payload": {
|
||||
"Id": folder.uuid,
|
||||
"UserId": folder.user_uuid,
|
||||
"RevisionDate": format_date(&folder.updated_at)
|
||||
}
|
||||
"userId": user.uuid,
|
||||
"date": format_date(&user.updated_at)
|
||||
},
|
||||
"clientType": null,
|
||||
"installationId": null
|
||||
})));
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_id: &DeviceId, conn: &mut crate::db::DbConn) {
|
||||
pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &mut crate::db::DbConn) {
|
||||
if Device::check_user_has_push_device(&user.uuid, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user.uuid,
|
||||
"organizationId": null,
|
||||
"deviceId": push_uuid,
|
||||
"identifier": null,
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"userId": user.uuid,
|
||||
"date": format_date(&user.updated_at)
|
||||
},
|
||||
"clientType": null,
|
||||
"installationId": null
|
||||
})));
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push_folder_update(ut: UpdateType, folder: &Folder, device: &Device, conn: &mut crate::db::DbConn) {
|
||||
if Device::check_user_has_push_device(&folder.user_uuid, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": folder.user_uuid,
|
||||
"organizationId": null,
|
||||
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"id": folder.uuid,
|
||||
"userId": folder.user_uuid,
|
||||
"revisionDate": format_date(&folder.updated_at)
|
||||
},
|
||||
"clientType": null,
|
||||
"installationId": null
|
||||
})));
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push_send_update(ut: UpdateType, send: &Send, device: &Device, conn: &mut crate::db::DbConn) {
|
||||
if let Some(s) = &send.user_uuid {
|
||||
if Device::check_user_has_push_device(s, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": send.user_uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_id,
|
||||
"identifier": acting_device_id,
|
||||
"organizationId": null,
|
||||
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"Id": send.uuid,
|
||||
"UserId": send.user_uuid,
|
||||
"RevisionDate": format_date(&send.revision_date)
|
||||
}
|
||||
"id": send.uuid,
|
||||
"userId": send.user_uuid,
|
||||
"revisionDate": format_date(&send.revision_date)
|
||||
},
|
||||
"clientType": null,
|
||||
"installationId": null
|
||||
})));
|
||||
}
|
||||
}
|
||||
@@ -254,20 +266,20 @@ async fn send_to_push_relay(notification_data: Value) {
|
||||
return;
|
||||
}
|
||||
|
||||
let auth_push_token = match get_auth_push_token().await {
|
||||
let auth_api_token = match get_auth_api_token().await {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
debug!("Could not get the auth push token: {}", e);
|
||||
debug!("Could not get the auth push token: {e}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
let auth_header = format!("Bearer {auth_api_token}");
|
||||
|
||||
let req = match make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/send")) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
error!("An error occurred while sending a send update to the push relay: {}", e);
|
||||
error!("An error occurred while sending a send update to the push relay: {e}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
@@ -280,22 +292,24 @@ async fn send_to_push_relay(notification_data: Value) {
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
error!("An error occurred while sending a send update to the push relay: {}", e);
|
||||
error!("An error occurred while sending a send update to the push relay: {e}");
|
||||
};
|
||||
}
|
||||
|
||||
pub async fn push_auth_request(user_id: UserId, auth_request_id: String, conn: &mut crate::db::DbConn) {
|
||||
if Device::check_user_has_push_device(&user_id, conn).await {
|
||||
pub async fn push_auth_request(user_id: &UserId, auth_request_id: &str, device: &Device, conn: &mut crate::db::DbConn) {
|
||||
if Device::check_user_has_push_device(user_id, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user_id,
|
||||
"organizationId": (),
|
||||
"deviceId": null,
|
||||
"identifier": null,
|
||||
"organizationId": null,
|
||||
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||
"type": UpdateType::AuthRequest as i32,
|
||||
"payload": {
|
||||
"Id": auth_request_id,
|
||||
"UserId": user_id,
|
||||
}
|
||||
"userId": user_id,
|
||||
"id": auth_request_id,
|
||||
},
|
||||
"clientType": null,
|
||||
"installationId": null
|
||||
})));
|
||||
}
|
||||
}
|
||||
@@ -303,20 +317,22 @@ pub async fn push_auth_request(user_id: UserId, auth_request_id: String, conn: &
|
||||
pub async fn push_auth_response(
|
||||
user_id: &UserId,
|
||||
auth_request_id: &AuthRequestId,
|
||||
approving_device_id: &DeviceId,
|
||||
device: &Device,
|
||||
conn: &mut crate::db::DbConn,
|
||||
) {
|
||||
if Device::check_user_has_push_device(user_id, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user_id,
|
||||
"organizationId": (),
|
||||
"deviceId": approving_device_id,
|
||||
"identifier": approving_device_id,
|
||||
"organizationId": null,
|
||||
"deviceId": device.push_uuid, // Should be the records unique uuid of the acting device (unique uuid per user/device)
|
||||
"identifier": device.uuid, // Should be the acting device id (aka uuid per device/app)
|
||||
"type": UpdateType::AuthRequestResponse as i32,
|
||||
"payload": {
|
||||
"Id": auth_request_id,
|
||||
"UserId": user_id,
|
||||
}
|
||||
"userId": user_id,
|
||||
"id": auth_request_id,
|
||||
},
|
||||
"clientType": null,
|
||||
"installationId": null
|
||||
})));
|
||||
}
|
||||
}
|
||||
|
@@ -55,9 +55,10 @@ fn not_found() -> ApiResult<Html<String>> {
|
||||
#[get("/css/vaultwarden.css")]
|
||||
fn vaultwarden_css() -> Cached<Css<String>> {
|
||||
let css_options = json!({
|
||||
"signup_disabled": !CONFIG.signups_allowed() && CONFIG.signups_domains_whitelist().is_empty(),
|
||||
"signup_disabled": CONFIG.is_signup_disabled(),
|
||||
"mail_enabled": CONFIG.mail_enabled(),
|
||||
"yubico_enabled": CONFIG._enable_yubico() && (CONFIG.yubico_client_id().is_some() == CONFIG.yubico_secret_key().is_some()),
|
||||
"mail_2fa_enabled": CONFIG._enable_email_2fa(),
|
||||
"yubico_enabled": CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some(),
|
||||
"emergency_access_allowed": CONFIG.emergency_access_allowed(),
|
||||
"sends_allowed": CONFIG.sends_allowed(),
|
||||
"load_user_scss": true,
|
||||
|
139
src/auth.rs
139
src/auth.rs
@@ -7,16 +7,14 @@ use once_cell::sync::{Lazy, OnceCell};
|
||||
use openssl::rsa::Rsa;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::ser::Serialize;
|
||||
use std::{
|
||||
env,
|
||||
fs::File,
|
||||
io::{Read, Write},
|
||||
net::IpAddr,
|
||||
};
|
||||
use std::{env, net::IpAddr};
|
||||
|
||||
use crate::db::models::{
|
||||
AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId,
|
||||
SendFileId, SendId, UserId,
|
||||
use crate::{
|
||||
config::PathType,
|
||||
db::models::{
|
||||
AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId,
|
||||
SendFileId, SendId, UserId,
|
||||
},
|
||||
};
|
||||
use crate::{error::Error, CONFIG};
|
||||
|
||||
@@ -35,41 +33,38 @@ static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.
|
||||
static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.domain_origin()));
|
||||
static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin()));
|
||||
static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin()));
|
||||
static JWT_REGISTER_VERIFY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|register_verify", CONFIG.domain_origin()));
|
||||
|
||||
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
|
||||
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
|
||||
|
||||
pub fn initialize_keys() -> Result<(), Error> {
|
||||
fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), Error> {
|
||||
let mut priv_key_buffer = Vec::with_capacity(2048);
|
||||
pub async fn initialize_keys() -> Result<(), Error> {
|
||||
use std::io::Error;
|
||||
|
||||
let mut priv_key_file = File::options()
|
||||
.create(create_if_missing)
|
||||
.truncate(false)
|
||||
.read(true)
|
||||
.write(create_if_missing)
|
||||
.open(CONFIG.private_rsa_key())?;
|
||||
let rsa_key_filename = std::path::PathBuf::from(CONFIG.private_rsa_key())
|
||||
.file_name()
|
||||
.ok_or_else(|| Error::other("Private RSA key path missing filename"))?
|
||||
.to_str()
|
||||
.ok_or_else(|| Error::other("Private RSA key path filename is not valid UTF-8"))?
|
||||
.to_string();
|
||||
|
||||
#[allow(clippy::verbose_file_reads)]
|
||||
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::RsaKey).map_err(Error::other)?;
|
||||
|
||||
let rsa_key = if bytes_read > 0 {
|
||||
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
|
||||
} else if create_if_missing {
|
||||
// Only create the key if the file doesn't exist or is empty
|
||||
let rsa_key = Rsa::generate(2048)?;
|
||||
priv_key_buffer = rsa_key.private_key_to_pem()?;
|
||||
priv_key_file.write_all(&priv_key_buffer)?;
|
||||
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
|
||||
rsa_key
|
||||
} else {
|
||||
err!("Private key does not exist or invalid format", CONFIG.private_rsa_key());
|
||||
};
|
||||
let priv_key_buffer = match operator.read(&rsa_key_filename).await {
|
||||
Ok(buffer) => Some(buffer),
|
||||
Err(e) if e.kind() == opendal::ErrorKind::NotFound => None,
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
Ok((rsa_key, priv_key_buffer))
|
||||
}
|
||||
|
||||
let (priv_key, priv_key_buffer) = read_key(true).or_else(|_| read_key(false))?;
|
||||
let (priv_key, priv_key_buffer) = if let Some(priv_key_buffer) = priv_key_buffer {
|
||||
(Rsa::private_key_from_pem(priv_key_buffer.to_vec().as_slice())?, priv_key_buffer.to_vec())
|
||||
} else {
|
||||
let rsa_key = Rsa::generate(2048)?;
|
||||
let priv_key_buffer = rsa_key.private_key_to_pem()?;
|
||||
operator.write(&rsa_key_filename, priv_key_buffer.clone()).await?;
|
||||
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
|
||||
(rsa_key, priv_key_buffer)
|
||||
};
|
||||
let pub_key_buffer = priv_key.public_key_to_pem()?;
|
||||
|
||||
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;
|
||||
@@ -145,6 +140,10 @@ pub fn decode_file_download(token: &str) -> Result<FileDownloadClaims, Error> {
|
||||
decode_jwt(token, JWT_FILE_DOWNLOAD_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_register_verify(token: &str) -> Result<RegisterVerifyClaims, Error> {
|
||||
decode_jwt(token, JWT_REGISTER_VERIFY_ISSUER.to_string())
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoginJwtClaims {
|
||||
// Not before
|
||||
@@ -176,6 +175,11 @@ pub struct LoginJwtClaims {
|
||||
pub sstamp: String,
|
||||
// device uuid
|
||||
pub device: DeviceId,
|
||||
// what kind of device, like FirefoxBrowser or Android derived from DeviceType
|
||||
pub devicetype: String,
|
||||
// the type of client_id, like web, cli, desktop, browser or mobile
|
||||
pub client_id: String,
|
||||
|
||||
// [ "api", "offline_access" ]
|
||||
pub scope: Vec<String>,
|
||||
// [ "Application" ]
|
||||
@@ -284,7 +288,7 @@ pub fn generate_organization_api_key_login_claims(
|
||||
exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(),
|
||||
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
||||
sub: org_api_key_uuid,
|
||||
client_id: format!("organization.{}", org_id),
|
||||
client_id: format!("organization.{org_id}"),
|
||||
client_sub: org_id,
|
||||
scope: vec!["api.organization".into()],
|
||||
}
|
||||
@@ -315,6 +319,33 @@ pub fn generate_file_download_claims(cipher_id: CipherId, file_id: AttachmentId)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct RegisterVerifyClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
pub exp: i64,
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
|
||||
pub name: Option<String>,
|
||||
pub verified: bool,
|
||||
}
|
||||
|
||||
pub fn generate_register_verify_claims(email: String, name: Option<String>, verified: bool) -> RegisterVerifyClaims {
|
||||
let time_now = Utc::now();
|
||||
RegisterVerifyClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + TimeDelta::try_minutes(30).unwrap()).timestamp(),
|
||||
iss: JWT_REGISTER_VERIFY_ISSUER.to_string(),
|
||||
sub: email,
|
||||
name,
|
||||
verified,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BasicJwtClaims {
|
||||
// Not before
|
||||
@@ -515,7 +546,7 @@ impl<'r> FromRequest<'r> for Headers {
|
||||
let mut user = user;
|
||||
user.reset_stamp_exception();
|
||||
if let Err(e) = user.save(&mut conn).await {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
error!("Error updating user: {e:#?}");
|
||||
}
|
||||
err_handler!("Stamp exception is expired")
|
||||
} else if !stamp_exception.routes.contains(¤t_route.to_string()) {
|
||||
@@ -657,17 +688,6 @@ impl<'r> FromRequest<'r> for AdminHeaders {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AdminHeaders> for Headers {
|
||||
fn from(h: AdminHeaders) -> Headers {
|
||||
Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
ip: h.ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
|
||||
// but there could be cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
@@ -837,8 +857,10 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
||||
|
||||
pub struct OrgMemberHeaders {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_id: OrganizationId,
|
||||
pub membership: Membership,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -850,8 +872,10 @@ impl<'r> FromRequest<'r> for OrgMemberHeaders {
|
||||
if headers.is_member() {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_id: headers.membership.org_uuid,
|
||||
membership: headers.membership,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Member of the Organization to call this endpoint")
|
||||
@@ -859,6 +883,17 @@ impl<'r> FromRequest<'r> for OrgMemberHeaders {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OrgMemberHeaders> for Headers {
|
||||
fn from(h: OrgMemberHeaders) -> Headers {
|
||||
Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
ip: h.ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Client IP address detection
|
||||
//
|
||||
@@ -879,7 +914,7 @@ impl<'r> FromRequest<'r> for ClientIp {
|
||||
None => ip,
|
||||
}
|
||||
.parse()
|
||||
.map_err(|_| warn!("'{}' header is malformed: {}", CONFIG.ip_header(), ip))
|
||||
.map_err(|_| warn!("'{}' header is malformed: {ip}", CONFIG.ip_header()))
|
||||
.ok()
|
||||
})
|
||||
} else {
|
||||
|
230
src/config.rs
230
src/config.rs
@@ -3,7 +3,7 @@ use std::{
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
RwLock,
|
||||
LazyLock, RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -22,10 +22,32 @@ static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
||||
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json"))
|
||||
});
|
||||
|
||||
static CONFIG_FILE_PARENT_DIR: LazyLock<String> = LazyLock::new(|| {
|
||||
let path = std::path::PathBuf::from(&*CONFIG_FILE);
|
||||
path.parent().unwrap_or(std::path::Path::new("data")).to_str().unwrap_or("data").to_string()
|
||||
});
|
||||
|
||||
static CONFIG_FILENAME: LazyLock<String> = LazyLock::new(|| {
|
||||
let path = std::path::PathBuf::from(&*CONFIG_FILE);
|
||||
path.file_name().unwrap_or(std::ffi::OsStr::new("config.json")).to_str().unwrap_or("config.json").to_string()
|
||||
});
|
||||
|
||||
pub static SKIP_CONFIG_VALIDATION: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
||||
Config::load().unwrap_or_else(|e| {
|
||||
std::thread::spawn(|| {
|
||||
let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n {e:?}\n");
|
||||
exit(12)
|
||||
});
|
||||
|
||||
rt.block_on(Config::load()).unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n {e:?}\n");
|
||||
exit(12)
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n {e:?}\n");
|
||||
exit(12)
|
||||
})
|
||||
@@ -104,16 +126,17 @@ macro_rules! make_config {
|
||||
|
||||
let mut builder = ConfigBuilder::default();
|
||||
$($(
|
||||
builder.$name = make_config! { @getenv paste::paste!(stringify!([<$name:upper>])), $ty };
|
||||
builder.$name = make_config! { @getenv pastey::paste!(stringify!([<$name:upper>])), $ty };
|
||||
)+)+
|
||||
|
||||
builder
|
||||
}
|
||||
|
||||
fn from_file(path: &str) -> Result<Self, Error> {
|
||||
let config_str = std::fs::read_to_string(path)?;
|
||||
println!("[INFO] Using saved config from `{path}` for configuration.\n");
|
||||
serde_json::from_str(&config_str).map_err(Into::into)
|
||||
async fn from_file() -> Result<Self, Error> {
|
||||
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
|
||||
let config_bytes = operator.read(&CONFIG_FILENAME).await?;
|
||||
println!("[INFO] Using saved config from `{}` for configuration.\n", *CONFIG_FILE);
|
||||
serde_json::from_slice(&config_bytes.to_vec()).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn clear_non_editable(&mut self) {
|
||||
@@ -133,7 +156,7 @@ macro_rules! make_config {
|
||||
builder.$name = v.clone();
|
||||
|
||||
if self.$name.is_some() {
|
||||
overrides.push(paste::paste!(stringify!([<$name:upper>])).into());
|
||||
overrides.push(pastey::paste!(stringify!([<$name:upper>])).into());
|
||||
}
|
||||
}
|
||||
)+)+
|
||||
@@ -231,7 +254,7 @@ macro_rules! make_config {
|
||||
element.insert("default".into(), serde_json::to_value(def.$name).unwrap());
|
||||
element.insert("type".into(), (_get_form_type(stringify!($ty))).into());
|
||||
element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into());
|
||||
element.insert("overridden".into(), (overridden.contains(&paste::paste!(stringify!([<$name:upper>])).into())).into());
|
||||
element.insert("overridden".into(), (overridden.contains(&pastey::paste!(stringify!([<$name:upper>])).into())).into());
|
||||
element
|
||||
}),
|
||||
)+
|
||||
@@ -375,19 +398,19 @@ make_config! {
|
||||
/// Data folder |> Main data folder
|
||||
data_folder: String, false, def, "data".to_string();
|
||||
/// Database URL
|
||||
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
|
||||
database_url: String, false, auto, |c| format!("{}/db.sqlite3", c.data_folder);
|
||||
/// Icon cache folder
|
||||
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
|
||||
icon_cache_folder: String, false, auto, |c| format!("{}/icon_cache", c.data_folder);
|
||||
/// Attachments folder
|
||||
attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments");
|
||||
attachments_folder: String, false, auto, |c| format!("{}/attachments", c.data_folder);
|
||||
/// Sends folder
|
||||
sends_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "sends");
|
||||
sends_folder: String, false, auto, |c| format!("{}/sends", c.data_folder);
|
||||
/// Temp folder |> Used for storing temporary file uploads
|
||||
tmp_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "tmp");
|
||||
tmp_folder: String, false, auto, |c| format!("{}/tmp", c.data_folder);
|
||||
/// Templates folder
|
||||
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
||||
templates_folder: String, false, auto, |c| format!("{}/templates", c.data_folder);
|
||||
/// Session JWT key
|
||||
rsa_key_filename: String, false, auto, |c| format!("{}/{}", c.data_folder, "rsa_key");
|
||||
rsa_key_filename: String, false, auto, |c| format!("{}/rsa_key", c.data_folder);
|
||||
/// Web vault folder
|
||||
web_vault_folder: String, false, def, "web-vault/".to_string();
|
||||
},
|
||||
@@ -484,7 +507,8 @@ make_config! {
|
||||
disable_icon_download: bool, true, def, false;
|
||||
/// Allow new signups |> Controls whether new users can register. Users can be invited by the vaultwarden admin even if this is disabled
|
||||
signups_allowed: bool, true, def, true;
|
||||
/// Require email verification on signups. This will prevent logins from succeeding until the address has been verified
|
||||
/// Require email verification on signups. On new client versions, this will require verification at signup time. On older clients,
|
||||
/// this will prevent logins from succeeding until the address has been verified
|
||||
signups_verify: bool, true, def, false;
|
||||
/// If signups require email verification, automatically re-send verification email if it hasn't been sent for a while (in seconds)
|
||||
signups_verify_resend_time: u64, true, def, 3_600;
|
||||
@@ -578,7 +602,7 @@ make_config! {
|
||||
authenticator_disable_time_drift: bool, true, def, false;
|
||||
|
||||
/// Customize the enabled feature flags on the clients |> This is a comma separated list of feature flags to enable.
|
||||
experimental_client_feature_flags: String, false, def, "fido2-vault-credentials".to_string();
|
||||
experimental_client_feature_flags: String, false, def, String::new();
|
||||
|
||||
/// Require new device emails |> When a user logs in an email is required to be sent.
|
||||
/// If sending the email fails the login attempt will fail.
|
||||
@@ -734,7 +758,7 @@ make_config! {
|
||||
email_expiration_time: u64, true, def, 600;
|
||||
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
|
||||
email_attempts_limit: u64, true, def, 3;
|
||||
/// Automatically enforce at login |> Setup email 2FA provider regardless of any organization policy
|
||||
/// Setup email 2FA at signup |> Setup email 2FA provider on registration regardless of any organization policy
|
||||
email_2fa_enforce_on_verified_invite: bool, true, def, false;
|
||||
/// Auto-enable 2FA (Know the risks!) |> Automatically setup email 2FA as fallback provider when needed
|
||||
email_2fa_auto_fallback: bool, true, def, false;
|
||||
@@ -832,16 +856,25 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: deal with deprecated flags so they can be removed from this list, cf. #4263
|
||||
// Server (v2025.6.2): https://github.com/bitwarden/server/blob/d094be3267f2030bd0dc62106bc6871cf82682f5/src/Core/Constants.cs#L103
|
||||
// Client (web-v2025.6.1): https://github.com/bitwarden/clients/blob/747c2fd6a1c348a57a76e4a7de8128466ffd3c01/libs/common/src/enums/feature-flag.enum.ts#L12
|
||||
// Android (v2025.6.0): https://github.com/bitwarden/android/blob/b5b022caaad33390c31b3021b2c1205925b0e1a2/app/src/main/kotlin/com/x8bit/bitwarden/data/platform/manager/model/FlagKey.kt#L22
|
||||
// iOS (v2025.6.0): https://github.com/bitwarden/ios/blob/ff06d9c6cc8da89f78f37f376495800201d7261a/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
|
||||
//
|
||||
// NOTE: Move deprecated flags to the utils::parse_experimental_client_feature_flags() DEPRECATED_FLAGS const!
|
||||
const KNOWN_FLAGS: &[&str] = &[
|
||||
"autofill-overlay",
|
||||
"autofill-v2",
|
||||
"browser-fileless-import",
|
||||
"extension-refresh",
|
||||
"fido2-vault-credentials",
|
||||
// Autofill Team
|
||||
"inline-menu-positioning-improvements",
|
||||
"ssh-key-vault-item",
|
||||
"inline-menu-totp",
|
||||
"ssh-agent",
|
||||
// Key Management Team
|
||||
"ssh-key-vault-item",
|
||||
// Tools
|
||||
"export-attachments",
|
||||
// Mobile Team
|
||||
"anon-addy-self-host-alias",
|
||||
"simple-login-self-host-alias",
|
||||
"mutual-tls",
|
||||
];
|
||||
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
|
||||
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();
|
||||
@@ -1128,11 +1161,103 @@ fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls
|
||||
"starttls".to_string()
|
||||
}
|
||||
|
||||
fn opendal_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
|
||||
// Cache of previously built operators by path
|
||||
static OPERATORS_BY_PATH: LazyLock<dashmap::DashMap<String, opendal::Operator>> =
|
||||
LazyLock::new(dashmap::DashMap::new);
|
||||
|
||||
if let Some(operator) = OPERATORS_BY_PATH.get(path) {
|
||||
return Ok(operator.clone());
|
||||
}
|
||||
|
||||
let operator = if path.starts_with("s3://") {
|
||||
#[cfg(not(s3))]
|
||||
return Err(opendal::Error::new(opendal::ErrorKind::ConfigInvalid, "S3 support is not enabled").into());
|
||||
|
||||
#[cfg(s3)]
|
||||
opendal_s3_operator_for_path(path)?
|
||||
} else {
|
||||
let builder = opendal::services::Fs::default().root(path);
|
||||
opendal::Operator::new(builder)?.finish()
|
||||
};
|
||||
|
||||
OPERATORS_BY_PATH.insert(path.to_string(), operator.clone());
|
||||
|
||||
Ok(operator)
|
||||
}
|
||||
|
||||
#[cfg(s3)]
|
||||
fn opendal_s3_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
|
||||
use crate::http_client::aws::AwsReqwestConnector;
|
||||
use aws_config::{default_provider::credentials::DefaultCredentialsChain, provider_config::ProviderConfig};
|
||||
|
||||
// This is a custom AWS credential loader that uses the official AWS Rust
|
||||
// SDK config crate to load credentials. This ensures maximum compatibility
|
||||
// with AWS credential configurations. For example, OpenDAL doesn't support
|
||||
// AWS SSO temporary credentials yet.
|
||||
struct OpenDALS3CredentialLoader {}
|
||||
|
||||
#[async_trait]
|
||||
impl reqsign::AwsCredentialLoad for OpenDALS3CredentialLoader {
|
||||
async fn load_credential(&self, _client: reqwest::Client) -> anyhow::Result<Option<reqsign::AwsCredential>> {
|
||||
use aws_credential_types::provider::ProvideCredentials as _;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
static DEFAULT_CREDENTIAL_CHAIN: OnceCell<DefaultCredentialsChain> = OnceCell::const_new();
|
||||
|
||||
let chain = DEFAULT_CREDENTIAL_CHAIN
|
||||
.get_or_init(|| {
|
||||
let reqwest_client = reqwest::Client::builder().build().unwrap();
|
||||
let connector = AwsReqwestConnector {
|
||||
client: reqwest_client,
|
||||
};
|
||||
|
||||
let conf = ProviderConfig::default().with_http_client(connector);
|
||||
|
||||
DefaultCredentialsChain::builder().configure(conf).build()
|
||||
})
|
||||
.await;
|
||||
|
||||
let creds = chain.provide_credentials().await?;
|
||||
|
||||
Ok(Some(reqsign::AwsCredential {
|
||||
access_key_id: creds.access_key_id().to_string(),
|
||||
secret_access_key: creds.secret_access_key().to_string(),
|
||||
session_token: creds.session_token().map(|s| s.to_string()),
|
||||
expires_in: creds.expiry().map(|expiration| expiration.into()),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
const OPEN_DAL_S3_CREDENTIAL_LOADER: OpenDALS3CredentialLoader = OpenDALS3CredentialLoader {};
|
||||
|
||||
let url = Url::parse(path).map_err(|e| format!("Invalid path S3 URL path {path:?}: {e}"))?;
|
||||
|
||||
let bucket = url.host_str().ok_or_else(|| format!("Missing Bucket name in data folder S3 URL {path:?}"))?;
|
||||
|
||||
let builder = opendal::services::S3::default()
|
||||
.customized_credential_load(Box::new(OPEN_DAL_S3_CREDENTIAL_LOADER))
|
||||
.enable_virtual_host_style()
|
||||
.bucket(bucket)
|
||||
.root(url.path())
|
||||
.default_storage_class("INTELLIGENT_TIERING");
|
||||
|
||||
Ok(opendal::Operator::new(builder)?.finish())
|
||||
}
|
||||
|
||||
pub enum PathType {
|
||||
Data,
|
||||
IconCache,
|
||||
Attachments,
|
||||
Sends,
|
||||
RsaKey,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn load() -> Result<Self, Error> {
|
||||
pub async fn load() -> Result<Self, Error> {
|
||||
// Loading from env and file
|
||||
let _env = ConfigBuilder::from_env();
|
||||
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default();
|
||||
let _usr = ConfigBuilder::from_file().await.unwrap_or_default();
|
||||
|
||||
// Create merged config, config file overwrites env
|
||||
let mut _overrides = Vec::new();
|
||||
@@ -1156,7 +1281,7 @@ impl Config {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
|
||||
pub async fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
|
||||
// Remove default values
|
||||
//let builder = other.remove(&self.inner.read().unwrap()._env);
|
||||
|
||||
@@ -1188,20 +1313,19 @@ impl Config {
|
||||
}
|
||||
|
||||
//Save to file
|
||||
use std::{fs::File, io::Write};
|
||||
let mut file = File::create(&*CONFIG_FILE)?;
|
||||
file.write_all(config_str.as_bytes())?;
|
||||
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
|
||||
operator.write(&CONFIG_FILENAME, config_str).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
async fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
let builder = {
|
||||
let usr = &self.inner.read().unwrap()._usr;
|
||||
let mut _overrides = Vec::new();
|
||||
usr.merge(&other, false, &mut _overrides)
|
||||
};
|
||||
self.update_config(builder, false)
|
||||
self.update_config(builder, false).await
|
||||
}
|
||||
|
||||
/// Tests whether an email's domain is allowed. A domain is allowed if it
|
||||
@@ -1210,7 +1334,7 @@ impl Config {
|
||||
pub fn is_email_domain_allowed(&self, email: &str) -> bool {
|
||||
let e: Vec<&str> = email.rsplitn(2, '@').collect();
|
||||
if e.len() != 2 || e[0].is_empty() || e[1].is_empty() {
|
||||
warn!("Failed to parse email address '{}'", email);
|
||||
warn!("Failed to parse email address '{email}'");
|
||||
return false;
|
||||
}
|
||||
let email_domain = e[0].to_lowercase();
|
||||
@@ -1230,6 +1354,14 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
// The registration link should be hidden if signup is not allowed and whitelist is empty
|
||||
// unless mail is disabled and invitations are allowed
|
||||
pub fn is_signup_disabled(&self) -> bool {
|
||||
!self.signups_allowed()
|
||||
&& self.signups_domains_whitelist().is_empty()
|
||||
&& (self.mail_enabled() || !self.invitations_allowed())
|
||||
}
|
||||
|
||||
/// Tests whether the specified user is allowed to create an organization.
|
||||
pub fn is_org_creation_allowed(&self, email: &str) -> bool {
|
||||
let users = self.org_creation_users();
|
||||
@@ -1243,8 +1375,9 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete_user_config(&self) -> Result<(), Error> {
|
||||
std::fs::remove_file(&*CONFIG_FILE)?;
|
||||
pub async fn delete_user_config(&self) -> Result<(), Error> {
|
||||
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
|
||||
operator.delete(&CONFIG_FILENAME).await?;
|
||||
|
||||
// Empty user config
|
||||
let usr = ConfigBuilder::default();
|
||||
@@ -1274,7 +1407,7 @@ impl Config {
|
||||
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
|
||||
}
|
||||
|
||||
pub fn get_duo_akey(&self) -> String {
|
||||
pub async fn get_duo_akey(&self) -> String {
|
||||
if let Some(akey) = self._duo_akey() {
|
||||
akey
|
||||
} else {
|
||||
@@ -1285,7 +1418,7 @@ impl Config {
|
||||
_duo_akey: Some(akey_s.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
self.update_config_partial(builder).ok();
|
||||
self.update_config_partial(builder).await.ok();
|
||||
|
||||
akey_s
|
||||
}
|
||||
@@ -1298,6 +1431,23 @@ impl Config {
|
||||
token.is_some() && !token.unwrap().trim().is_empty()
|
||||
}
|
||||
|
||||
pub fn opendal_operator_for_path_type(&self, path_type: PathType) -> Result<opendal::Operator, Error> {
|
||||
let path = match path_type {
|
||||
PathType::Data => self.data_folder(),
|
||||
PathType::IconCache => self.icon_cache_folder(),
|
||||
PathType::Attachments => self.attachments_folder(),
|
||||
PathType::Sends => self.sends_folder(),
|
||||
PathType::RsaKey => std::path::Path::new(&self.rsa_key_filename())
|
||||
.parent()
|
||||
.ok_or_else(|| std::io::Error::other("Failed to get directory of RSA key file"))?
|
||||
.to_str()
|
||||
.ok_or_else(|| std::io::Error::other("Failed to convert RSA key file directory to UTF-8 string"))?
|
||||
.to_string(),
|
||||
};
|
||||
|
||||
opendal_operator_for_path(&path)
|
||||
}
|
||||
|
||||
pub fn render_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
|
||||
if self.reload_templates() {
|
||||
warn!("RELOADING TEMPLATES");
|
||||
@@ -1367,6 +1517,7 @@ where
|
||||
reg!("email/email_footer_text");
|
||||
|
||||
reg!("email/admin_reset_password", ".html");
|
||||
reg!("email/change_email_existing", ".html");
|
||||
reg!("email/change_email", ".html");
|
||||
reg!("email/delete_account", ".html");
|
||||
reg!("email/emergency_access_invite_accepted", ".html");
|
||||
@@ -1383,6 +1534,7 @@ where
|
||||
reg!("email/protected_action", ".html");
|
||||
reg!("email/pw_hint_none", ".html");
|
||||
reg!("email/pw_hint_some", ".html");
|
||||
reg!("email/register_verify_email", ".html");
|
||||
reg!("email/send_2fa_removed_from_org", ".html");
|
||||
reg!("email/send_emergency_access_invite", ".html");
|
||||
reg!("email/send_org_invite", ".html");
|
||||
|
@@ -110,7 +110,6 @@ pub fn generate_api_key() -> String {
|
||||
// Constant time compare
|
||||
//
|
||||
pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
|
||||
use ring::constant_time::verify_slices_are_equal;
|
||||
|
||||
verify_slices_are_equal(a.as_ref(), b.as_ref()).is_ok()
|
||||
use subtle::ConstantTimeEq;
|
||||
a.as_ref().ct_eq(b.as_ref()).into()
|
||||
}
|
||||
|
@@ -130,7 +130,7 @@ macro_rules! generate_connections {
|
||||
DbConnType::$name => {
|
||||
#[cfg($name)]
|
||||
{
|
||||
paste::paste!{ [< $name _migrations >]::run_migrations()?; }
|
||||
pastey::paste!{ [< $name _migrations >]::run_migrations()?; }
|
||||
let manager = ConnectionManager::new(&url);
|
||||
let pool = Pool::builder()
|
||||
.max_size(CONFIG.database_max_conns())
|
||||
@@ -259,7 +259,7 @@ macro_rules! db_run {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
$crate::db::DbConnInner::$db($conn) => {
|
||||
paste::paste! {
|
||||
pastey::paste! {
|
||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||
#[allow(unused)] use [<__ $db _model>]::*;
|
||||
}
|
||||
@@ -280,7 +280,7 @@ macro_rules! db_run {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
$crate::db::DbConnInner::$db($conn) => {
|
||||
paste::paste! {
|
||||
pastey::paste! {
|
||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||
// @ RAW: #[allow(unused)] use [<__ $db _model>]::*;
|
||||
}
|
||||
@@ -337,7 +337,7 @@ macro_rules! db_object {
|
||||
};
|
||||
|
||||
( @db $db:ident | $( #[$attr:meta] )* | $name:ident | $( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty),+) => {
|
||||
paste::paste! {
|
||||
pastey::paste! {
|
||||
#[allow(unused)] use super::*;
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::*;
|
||||
|
@@ -1,11 +1,11 @@
|
||||
use std::io::ErrorKind;
|
||||
use std::time::Duration;
|
||||
|
||||
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||
use derive_more::{AsRef, Deref, Display};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{CipherId, OrganizationId, UserId};
|
||||
use crate::CONFIG;
|
||||
use crate::{config::PathType, CONFIG};
|
||||
use macros::IdFromParam;
|
||||
|
||||
db_object! {
|
||||
@@ -41,24 +41,30 @@ impl Attachment {
|
||||
}
|
||||
|
||||
pub fn get_file_path(&self) -> String {
|
||||
format!("{}/{}/{}", CONFIG.attachments_folder(), self.cipher_uuid, self.id)
|
||||
format!("{}/{}", self.cipher_uuid, self.id)
|
||||
}
|
||||
|
||||
pub fn get_url(&self, host: &str) -> String {
|
||||
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
|
||||
format!("{}/attachments/{}/{}?token={}", host, self.cipher_uuid, self.id, token)
|
||||
pub async fn get_url(&self, host: &str) -> Result<String, crate::Error> {
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
|
||||
|
||||
if operator.info().scheme() == opendal::Scheme::Fs {
|
||||
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
|
||||
Ok(format!("{host}/attachments/{}/{}?token={token}", self.cipher_uuid, self.id))
|
||||
} else {
|
||||
Ok(operator.presign_read(&self.get_file_path(), Duration::from_secs(5 * 60)).await?.uri().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_json(&self, host: &str) -> Value {
|
||||
json!({
|
||||
pub async fn to_json(&self, host: &str) -> Result<Value, crate::Error> {
|
||||
Ok(json!({
|
||||
"id": self.id,
|
||||
"url": self.get_url(host),
|
||||
"url": self.get_url(host).await?,
|
||||
"fileName": self.file_name,
|
||||
"size": self.file_size.to_string(),
|
||||
"sizeName": crate::util::get_display_size(self.file_size),
|
||||
"key": self.akey,
|
||||
"object": "attachment"
|
||||
})
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,26 +110,26 @@ impl Attachment {
|
||||
|
||||
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
let _: () = crate::util::retry(
|
||||
crate::util::retry(
|
||||
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
||||
10,
|
||||
)
|
||||
.map_res("Error deleting attachment")?;
|
||||
.map(|_| ())
|
||||
.map_res("Error deleting attachment")
|
||||
}}?;
|
||||
|
||||
let file_path = &self.get_file_path();
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
|
||||
let file_path = self.get_file_path();
|
||||
|
||||
match std::fs::remove_file(file_path) {
|
||||
// Ignore "file not found" errors. This can happen when the
|
||||
// upstream caller has already cleaned up the file as part of
|
||||
// its own error handling.
|
||||
Err(e) if e.kind() == ErrorKind::NotFound => {
|
||||
debug!("File '{}' already deleted.", file_path);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
_ => Ok(()),
|
||||
if let Err(e) = operator.delete(&file_path).await {
|
||||
if e.kind() == opendal::ErrorKind::NotFound {
|
||||
debug!("File '{file_path}' already deleted.");
|
||||
} else {
|
||||
return Err(e.into());
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||
|
@@ -16,7 +16,7 @@ db_object! {
|
||||
pub organization_uuid: Option<OrganizationId>,
|
||||
|
||||
pub request_device_identifier: DeviceId,
|
||||
pub device_type: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
|
||||
pub device_type: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs
|
||||
|
||||
pub request_ip: String,
|
||||
pub response_device_id: Option<DeviceId>,
|
||||
|
@@ -85,7 +85,7 @@ impl Cipher {
|
||||
let mut validation_errors = serde_json::Map::new();
|
||||
let max_note_size = CONFIG._max_note_size();
|
||||
let max_note_size_msg =
|
||||
format!("The field Notes exceeds the maximum encrypted value length of {} characters.", &max_note_size);
|
||||
format!("The field Notes exceeds the maximum encrypted value length of {max_note_size} characters.");
|
||||
for (index, cipher) in cipher_data.iter().enumerate() {
|
||||
// Validate the note size and if it is exceeded return a warning
|
||||
if let Some(note) = &cipher.notes {
|
||||
@@ -141,18 +141,28 @@ impl Cipher {
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
sync_type: CipherSyncType,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
) -> Result<Value, crate::Error> {
|
||||
use crate::util::{format_date, validate_and_format_date};
|
||||
|
||||
let mut attachments_json: Value = Value::Null;
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) {
|
||||
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect();
|
||||
if !attachments.is_empty() {
|
||||
let mut attachments_json_vec = vec![];
|
||||
for attachment in attachments {
|
||||
attachments_json_vec.push(attachment.to_json(host).await?);
|
||||
}
|
||||
attachments_json = Value::Array(attachments_json_vec);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
|
||||
if !attachments.is_empty() {
|
||||
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect()
|
||||
let mut attachments_json_vec = vec![];
|
||||
for attachment in attachments {
|
||||
attachments_json_vec.push(attachment.to_json(host).await?);
|
||||
}
|
||||
attachments_json = Value::Array(attachments_json_vec);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,7 +328,7 @@ impl Cipher {
|
||||
// supports the "cipherDetails" type, though it seems like the
|
||||
// Bitwarden clients will ignore extra fields.
|
||||
//
|
||||
// Ref: https://github.com/bitwarden/server/blob/master/src/Core/Models/Api/Response/CipherResponseModel.cs
|
||||
// Ref: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Vault/Models/Response/CipherResponseModel.cs#L14
|
||||
let mut json_object = json!({
|
||||
"object": "cipherDetails",
|
||||
"id": self.uuid,
|
||||
@@ -372,6 +382,11 @@ impl Cipher {
|
||||
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
||||
json_object["edit"] = json!(!read_only);
|
||||
json_object["viewPassword"] = json!(!hide_passwords);
|
||||
// The new key used by clients since v2025.6.0
|
||||
json_object["permissions"] = json!({
|
||||
"delete": !read_only,
|
||||
"restore": !read_only,
|
||||
});
|
||||
}
|
||||
|
||||
let key = match self.atype {
|
||||
@@ -384,7 +399,7 @@ impl Cipher {
|
||||
};
|
||||
|
||||
json_object[key] = type_data_json;
|
||||
json_object
|
||||
Ok(json_object)
|
||||
}
|
||||
|
||||
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {
|
||||
@@ -594,22 +609,23 @@ impl Cipher {
|
||||
let mut rows: Vec<(bool, bool, bool)> = Vec::new();
|
||||
if let Some(collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||
for collection in collections {
|
||||
//User permissions
|
||||
// User permissions
|
||||
if let Some(cu) = cipher_sync_data.user_collections.get(collection) {
|
||||
rows.push((cu.read_only, cu.hide_passwords, cu.manage));
|
||||
}
|
||||
|
||||
//Group permissions
|
||||
if let Some(cg) = cipher_sync_data.user_collections_groups.get(collection) {
|
||||
// Group permissions
|
||||
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(collection) {
|
||||
rows.push((cg.read_only, cg.hide_passwords, cg.manage));
|
||||
}
|
||||
}
|
||||
}
|
||||
rows
|
||||
} else {
|
||||
let mut access_flags = self.get_user_collections_access_flags(user_uuid, conn).await;
|
||||
access_flags.append(&mut self.get_group_collections_access_flags(user_uuid, conn).await);
|
||||
access_flags
|
||||
let user_permissions = self.get_user_collections_access_flags(user_uuid, conn).await;
|
||||
if !user_permissions.is_empty() {
|
||||
user_permissions
|
||||
} else {
|
||||
self.get_group_collections_access_flags(user_uuid, conn).await
|
||||
}
|
||||
};
|
||||
|
||||
if rows.is_empty() {
|
||||
@@ -618,6 +634,9 @@ impl Cipher {
|
||||
}
|
||||
|
||||
// A cipher can be in multiple collections with inconsistent access flags.
|
||||
// Also, user permission overrule group permissions
|
||||
// and only user permissions are returned by the code above.
|
||||
//
|
||||
// For example, a cipher could be in one collection where the user has
|
||||
// read-only access, but also in another collection where the user has
|
||||
// read/write access. For a flag to be in effect for a cipher, upstream
|
||||
@@ -626,13 +645,15 @@ impl Cipher {
|
||||
// and `hide_passwords` columns. This could ideally be done as part of the
|
||||
// query, but Diesel doesn't support a min() or bool_and() function on
|
||||
// booleans and this behavior isn't portable anyway.
|
||||
//
|
||||
// The only exception is for the `manage` flag, that needs a boolean OR!
|
||||
let mut read_only = true;
|
||||
let mut hide_passwords = true;
|
||||
let mut manage = false;
|
||||
for (ro, hp, mn) in rows.iter() {
|
||||
read_only &= ro;
|
||||
hide_passwords &= hp;
|
||||
manage &= mn;
|
||||
manage |= mn;
|
||||
}
|
||||
|
||||
Some((read_only, hide_passwords, manage))
|
||||
|
@@ -11,6 +11,7 @@ use macros::UuidFromParam;
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = collections)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Collection {
|
||||
pub uuid: CollectionId,
|
||||
@@ -96,13 +97,13 @@ impl Collection {
|
||||
(
|
||||
cu.read_only,
|
||||
cu.hide_passwords,
|
||||
cu.manage || (is_manager && !cu.read_only && !cu.hide_passwords),
|
||||
is_manager && (cu.manage || (!cu.read_only && !cu.hide_passwords)),
|
||||
)
|
||||
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
|
||||
(
|
||||
cg.read_only,
|
||||
cg.hide_passwords,
|
||||
cg.manage || (is_manager && !cg.read_only && !cg.hide_passwords),
|
||||
is_manager && (cg.manage || (!cg.read_only && !cg.hide_passwords)),
|
||||
)
|
||||
} else {
|
||||
(false, false, false)
|
||||
@@ -113,7 +114,9 @@ impl Collection {
|
||||
} else {
|
||||
match Membership::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
Some(m) if m.has_full_access() => (false, false, m.atype >= MembershipType::Manager),
|
||||
Some(_) if self.is_manageable_by_user(user_uuid, conn).await => (false, false, true),
|
||||
Some(m) if m.atype == MembershipType::Manager && self.is_manageable_by_user(user_uuid, conn).await => {
|
||||
(false, false, true)
|
||||
}
|
||||
Some(m) => {
|
||||
let is_manager = m.atype == MembershipType::Manager;
|
||||
let read_only = !self.is_writable_by_user(user_uuid, conn).await;
|
||||
|
@@ -3,8 +3,12 @@ use derive_more::{Display, From};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{AuthRequest, UserId};
|
||||
use crate::{crypto, util::format_date, CONFIG};
|
||||
use macros::IdFromParam;
|
||||
use crate::{
|
||||
crypto,
|
||||
util::{format_date, get_uuid},
|
||||
CONFIG,
|
||||
};
|
||||
use macros::{IdFromParam, UuidFromParam};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -19,8 +23,8 @@ db_object! {
|
||||
pub user_uuid: UserId,
|
||||
|
||||
pub name: String,
|
||||
pub atype: i32, // https://github.com/bitwarden/server/blob/dcc199bcce4aa2d5621f6fab80f1b49d8b143418/src/Core/Enums/DeviceType.cs
|
||||
pub push_uuid: Option<String>,
|
||||
pub atype: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs
|
||||
pub push_uuid: Option<PushId>,
|
||||
pub push_token: Option<String>,
|
||||
|
||||
pub refresh_token: String,
|
||||
@@ -42,7 +46,7 @@ impl Device {
|
||||
name,
|
||||
atype,
|
||||
|
||||
push_uuid: None,
|
||||
push_uuid: Some(PushId(get_uuid())),
|
||||
push_token: None,
|
||||
refresh_token: String::new(),
|
||||
twofactor_remember: None,
|
||||
@@ -54,7 +58,7 @@ impl Device {
|
||||
"id": self.uuid,
|
||||
"name": self.name,
|
||||
"type": self.atype,
|
||||
"identifier": self.push_uuid,
|
||||
"identifier": self.uuid,
|
||||
"creationDate": format_date(&self.created_at),
|
||||
"isTrusted": false,
|
||||
"object":"device"
|
||||
@@ -73,7 +77,12 @@ impl Device {
|
||||
self.twofactor_remember = None;
|
||||
}
|
||||
|
||||
pub fn refresh_tokens(&mut self, user: &super::User, scope: Vec<String>) -> (String, i64) {
|
||||
pub fn refresh_tokens(
|
||||
&mut self,
|
||||
user: &super::User,
|
||||
scope: Vec<String>,
|
||||
client_id: Option<String>,
|
||||
) -> (String, i64) {
|
||||
// If there is no refresh token, we create one
|
||||
if self.refresh_token.is_empty() {
|
||||
use data_encoding::BASE64URL;
|
||||
@@ -84,6 +93,11 @@ impl Device {
|
||||
let time_now = Utc::now();
|
||||
self.updated_at = time_now.naive_utc();
|
||||
|
||||
// Generate a random push_uuid so if it doesn't already have one
|
||||
if self.push_uuid.is_none() {
|
||||
self.push_uuid = Some(PushId(get_uuid()));
|
||||
}
|
||||
|
||||
// ---
|
||||
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
||||
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
||||
@@ -121,6 +135,8 @@ impl Device {
|
||||
// orgmanager,
|
||||
sstamp: user.security_stamp.clone(),
|
||||
device: self.uuid.clone(),
|
||||
devicetype: DeviceType::from_i32(self.atype).to_string(),
|
||||
client_id: client_id.unwrap_or("undefined".to_string()),
|
||||
scope,
|
||||
amr: vec!["Application".into()],
|
||||
};
|
||||
@@ -132,8 +148,8 @@ impl Device {
|
||||
matches!(DeviceType::from_i32(self.atype), DeviceType::Android | DeviceType::Ios)
|
||||
}
|
||||
|
||||
pub fn is_registered(&self) -> bool {
|
||||
self.push_uuid.is_some()
|
||||
pub fn is_cli(&self) -> bool {
|
||||
matches!(DeviceType::from_i32(self.atype), DeviceType::WindowsCLI | DeviceType::MacOsCLI | DeviceType::LinuxCLI)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,10 +168,12 @@ impl DeviceWithAuthRequest {
|
||||
"id": self.device.uuid,
|
||||
"name": self.device.name,
|
||||
"type": self.device.atype,
|
||||
"identifier": self.device.push_uuid,
|
||||
"identifier": self.device.uuid,
|
||||
"creationDate": format_date(&self.device.created_at),
|
||||
"devicePendingAuthRequest": auth_request,
|
||||
"isTrusted": false,
|
||||
"encryptedPublicKey": null,
|
||||
"encryptedUserKey": null,
|
||||
"object": "device",
|
||||
})
|
||||
}
|
||||
@@ -391,3 +409,6 @@ impl DeviceType {
|
||||
Clone, Debug, DieselNewType, Display, From, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize, IdFromParam,
|
||||
)]
|
||||
pub struct DeviceId(String);
|
||||
|
||||
#[derive(Clone, Debug, DieselNewType, Display, From, FromForm, Serialize, Deserialize, UuidFromParam)]
|
||||
pub struct PushId(pub String);
|
||||
|
@@ -78,6 +78,7 @@ impl EmergencyAccess {
|
||||
"grantorId": grantor_user.uuid,
|
||||
"email": grantor_user.email,
|
||||
"name": grantor_user.name,
|
||||
"avatarColor": grantor_user.avatar_color,
|
||||
"object": "emergencyAccessGrantorDetails",
|
||||
})
|
||||
}
|
||||
@@ -106,6 +107,7 @@ impl EmergencyAccess {
|
||||
"granteeId": grantee_user.uuid,
|
||||
"email": grantee_user.email,
|
||||
"name": grantee_user.name,
|
||||
"avatarColor": grantee_user.avatar_color,
|
||||
"object": "emergencyAccessGranteeDetails",
|
||||
}))
|
||||
}
|
||||
|
@@ -8,11 +8,12 @@ use crate::{api::EmptyResult, db::DbConn, error::MapResult, CONFIG};
|
||||
// https://bitwarden.com/help/event-logs/
|
||||
|
||||
db_object! {
|
||||
// Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||
// Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Api/Models/Public/Response/EventResponseModel.cs
|
||||
// Upstream SQL: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Sql/dbo/Tables/Event.sql
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/EventService.cs
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Public/Models/Response/EventResponseModel.cs
|
||||
// Upstream SQL: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Sql/dbo/Tables/Event.sql
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = event)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Event {
|
||||
pub uuid: EventId,
|
||||
@@ -24,7 +25,7 @@ db_object! {
|
||||
pub group_uuid: Option<GroupId>,
|
||||
pub org_user_uuid: Option<MembershipId>,
|
||||
pub act_user_uuid: Option<UserId>,
|
||||
// Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/DeviceType.cs
|
||||
// Upstream enum: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs
|
||||
pub device_type: Option<i32>,
|
||||
pub ip_address: Option<String>,
|
||||
pub event_date: NaiveDateTime,
|
||||
@@ -35,7 +36,7 @@ db_object! {
|
||||
}
|
||||
}
|
||||
|
||||
// Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/EventType.cs
|
||||
// Upstream enum: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Enums/EventType.cs
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum EventType {
|
||||
// User
|
||||
@@ -71,7 +72,6 @@ pub enum EventType {
|
||||
CipherSoftDeleted = 1115,
|
||||
CipherRestored = 1116,
|
||||
CipherClientToggledCardNumberVisible = 1117,
|
||||
CipherClientToggledTOTPSeedVisible = 1118,
|
||||
|
||||
// Collection
|
||||
CollectionCreated = 1300,
|
||||
@@ -87,7 +87,7 @@ pub enum EventType {
|
||||
OrganizationUserInvited = 1500,
|
||||
OrganizationUserConfirmed = 1501,
|
||||
OrganizationUserUpdated = 1502,
|
||||
OrganizationUserRemoved = 1503,
|
||||
OrganizationUserRemoved = 1503, // Organization user data was deleted
|
||||
OrganizationUserUpdatedGroups = 1504,
|
||||
// OrganizationUserUnlinkedSso = 1505, // Not supported
|
||||
OrganizationUserResetPasswordEnroll = 1506,
|
||||
@@ -99,8 +99,8 @@ pub enum EventType {
|
||||
OrganizationUserRestored = 1512,
|
||||
OrganizationUserApprovedAuthRequest = 1513,
|
||||
OrganizationUserRejectedAuthRequest = 1514,
|
||||
OrganizationUserDeleted = 1515,
|
||||
OrganizationUserLeft = 1516,
|
||||
OrganizationUserDeleted = 1515, // Both user and organization user data were deleted
|
||||
OrganizationUserLeft = 1516, // User voluntarily left the organization
|
||||
|
||||
// Organization
|
||||
OrganizationUpdated = 1600,
|
||||
@@ -187,7 +187,7 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Database methods
|
||||
/// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||
/// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/EventService.cs
|
||||
impl Event {
|
||||
pub const PAGE_SIZE: i64 = 30;
|
||||
|
||||
|
@@ -10,6 +10,7 @@ use serde_json::Value;
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = groups)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Group {
|
||||
pub uuid: GroupId,
|
||||
@@ -67,16 +68,11 @@ impl Group {
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
use crate::util::format_date;
|
||||
|
||||
json!({
|
||||
"id": self.uuid,
|
||||
"organizationId": self.organizations_uuid,
|
||||
"name": self.name,
|
||||
"accessAll": self.access_all,
|
||||
"externalId": self.external_id,
|
||||
"creationDate": format_date(&self.creation_date),
|
||||
"revisionDate": format_date(&self.revision_date),
|
||||
"object": "group"
|
||||
})
|
||||
}
|
||||
@@ -297,7 +293,7 @@ impl Group {
|
||||
|
||||
pub async fn update_revision(uuid: &GroupId, conn: &mut DbConn) {
|
||||
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
|
||||
warn!("Failed to update revision for {}: {:#?}", uuid, e);
|
||||
warn!("Failed to update revision for {uuid}: {e:#?}");
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -20,7 +20,7 @@ pub use self::attachment::{Attachment, AttachmentId};
|
||||
pub use self::auth_request::{AuthRequest, AuthRequestId};
|
||||
pub use self::cipher::{Cipher, CipherId, RepromptType};
|
||||
pub use self::collection::{Collection, CollectionCipher, CollectionId, CollectionUser};
|
||||
pub use self::device::{Device, DeviceId, DeviceType};
|
||||
pub use self::device::{Device, DeviceId, DeviceType, PushId};
|
||||
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessId, EmergencyAccessStatus, EmergencyAccessType};
|
||||
pub use self::event::{Event, EventType};
|
||||
pub use self::favorite::Favorite;
|
||||
|
@@ -21,7 +21,7 @@ db_object! {
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/b86a04cef9f1e1b82cf18e49fc94e017c641130c/src/Core/Enums/PolicyType.cs
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Enums/PolicyType.cs
|
||||
#[derive(Copy, Clone, Eq, PartialEq, num_derive::FromPrimitive)]
|
||||
pub enum OrgPolicyType {
|
||||
TwoFactorAuthentication = 0,
|
||||
@@ -35,9 +35,13 @@ pub enum OrgPolicyType {
|
||||
ResetPassword = 8,
|
||||
// MaximumVaultTimeout = 9, // Not supported (Not AGPLv3 Licensed)
|
||||
// DisablePersonalVaultExport = 10, // Not supported (Not AGPLv3 Licensed)
|
||||
// ActivateAutofill = 11,
|
||||
// AutomaticAppLogIn = 12,
|
||||
// FreeFamiliesSponsorshipPolicy = 13,
|
||||
RemoveUnlockWithPin = 14,
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs#L5
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendOptionsPolicyData {
|
||||
@@ -45,7 +49,7 @@ pub struct SendOptionsPolicyData {
|
||||
pub disable_hide_email: bool,
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResetPasswordDataModel {
|
||||
@@ -79,14 +83,24 @@ impl OrgPolicy {
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
||||
json!({
|
||||
let mut policy = json!({
|
||||
"id": self.uuid,
|
||||
"organizationId": self.org_uuid,
|
||||
"type": self.atype,
|
||||
"data": data_json,
|
||||
"enabled": self.enabled,
|
||||
"object": "policy",
|
||||
})
|
||||
});
|
||||
|
||||
// Upstream adds this key/value
|
||||
// Allow enabling Single Org policy when the organization has claimed domains.
|
||||
// See: (https://github.com/bitwarden/server/pull/5565)
|
||||
// We return the same to prevent possible issues
|
||||
if self.atype == 8i32 {
|
||||
policy["canToggleState"] = json!(true);
|
||||
}
|
||||
|
||||
policy
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,7 +211,7 @@ impl OrgPolicy {
|
||||
pub async fn find_accepted_and_confirmed_by_user_and_active_policy(
|
||||
user_uuid: &UserId,
|
||||
policy_type: OrgPolicyType,
|
||||
conn: &mut DbConn,
|
||||
conn: &DbConn,
|
||||
) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
|
@@ -17,6 +17,7 @@ use macros::UuidFromParam;
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = organizations)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Organization {
|
||||
pub uuid: OrganizationId,
|
||||
@@ -28,6 +29,7 @@ db_object! {
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = users_organizations)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Membership {
|
||||
pub uuid: MembershipId,
|
||||
@@ -54,7 +56,7 @@ db_object! {
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/b86a04cef9f1e1b82cf18e49fc94e017c641130c/src/Core/Enums/OrganizationUserStatusType.cs
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Enums/OrganizationUserStatusType.cs
|
||||
#[derive(PartialEq)]
|
||||
pub enum MembershipStatus {
|
||||
Revoked = -1,
|
||||
@@ -175,7 +177,7 @@ impl Organization {
|
||||
public_key,
|
||||
}
|
||||
}
|
||||
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Models/Response/Organizations/OrganizationResponseModel.cs
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"id": self.uuid,
|
||||
@@ -201,7 +203,6 @@ impl Organization {
|
||||
"useResetPassword": CONFIG.mail_enabled(),
|
||||
"allowAdminAccessToAllCollectionItems": true,
|
||||
"limitCollectionCreation": true,
|
||||
"limitCollectionCreationDeletion": true,
|
||||
"limitCollectionDeletion": true,
|
||||
|
||||
"businessName": self.name,
|
||||
@@ -422,7 +423,7 @@ impl Membership {
|
||||
"manageScim": false // Not supported (Not AGPLv3 Licensed)
|
||||
});
|
||||
|
||||
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
|
||||
// https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Models/Response/ProfileOrganizationResponseModel.cs
|
||||
json!({
|
||||
"id": self.org_uuid,
|
||||
"identifier": null, // Not supported
|
||||
@@ -449,6 +450,8 @@ impl Membership {
|
||||
"usePasswordManager": true,
|
||||
"useCustomPermissions": true,
|
||||
"useActivateAutofillPolicy": false,
|
||||
"useAdminSponsoredFamilies": false,
|
||||
"useRiskInsights": false, // Not supported (Not AGPLv3 Licensed)
|
||||
|
||||
"organizationUserId": self.uuid,
|
||||
"providerId": null,
|
||||
@@ -456,7 +459,6 @@ impl Membership {
|
||||
"providerType": null,
|
||||
"familySponsorshipFriendlyName": null,
|
||||
"familySponsorshipAvailable": false,
|
||||
"planProductType": 3,
|
||||
"productTierType": 3, // Enterprise tier
|
||||
"keyConnectorEnabled": false,
|
||||
"keyConnectorUrl": null,
|
||||
@@ -465,10 +467,11 @@ impl Membership {
|
||||
"familySponsorshipToDelete": null,
|
||||
"accessSecretsManager": false,
|
||||
"limitCollectionCreation": self.atype < MembershipType::Manager, // If less then a manager return true, to limit collection creations
|
||||
"limitCollectionCreationDeletion": true,
|
||||
"limitCollectionDeletion": true,
|
||||
"limitItemDeletion": false,
|
||||
"allowAdminAccessToAllCollectionItems": true,
|
||||
"userIsManagedByOrganization": false, // Means not managed via the Members UI, like SSO
|
||||
"userIsClaimedByOrganization": false, // The new key instead of the obsolete userIsManagedByOrganization
|
||||
|
||||
"permissions": permissions,
|
||||
|
||||
@@ -517,7 +520,7 @@ impl Membership {
|
||||
CONFIG.org_groups_enabled() && Group::is_in_full_access_group(&self.user_uuid, &self.org_uuid, conn).await;
|
||||
|
||||
// If collections are to be included, only include them if the user does not have full access via a group or defined to the user it self
|
||||
let collections: Vec<Value> = if include_collections && !(full_access_group || self.has_full_access()) {
|
||||
let collections: Vec<Value> = if include_collections && !(full_access_group || self.access_all) {
|
||||
// Get all collections for the user here already to prevent more queries
|
||||
let cu: HashMap<CollectionId, CollectionUser> =
|
||||
CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
|
||||
@@ -614,6 +617,8 @@ impl Membership {
|
||||
"permissions": permissions,
|
||||
|
||||
"ssoBound": false, // Not supported
|
||||
"managedByOrganization": false, // This key is obsolete replaced by claimedByOrganization
|
||||
"claimedByOrganization": false, // Means not managed via the Members UI, like SSO
|
||||
"usesKeyConnector": false, // Not supported
|
||||
"accessSecretsManager": false, // Not supported (Not AGPLv3 Licensed)
|
||||
|
||||
@@ -861,6 +866,21 @@ impl Membership {
|
||||
}}
|
||||
}
|
||||
|
||||
// Get all users which are either owner or admin, or a manager which can manage/access all
|
||||
pub async fn find_confirmed_and_manage_all_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.filter(users_organizations::status.eq(MembershipStatus::Confirmed as i32))
|
||||
.filter(
|
||||
users_organizations::atype.eq_any(vec![MembershipType::Owner as i32, MembershipType::Admin as i32])
|
||||
.or(users_organizations::atype.eq(MembershipType::Manager as i32).and(users_organizations::access_all.eq(true)))
|
||||
)
|
||||
.load::<MembershipDb>(conn)
|
||||
.unwrap_or_default().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
|
@@ -1,7 +1,7 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::util::LowerCase;
|
||||
use crate::{config::PathType, util::LowerCase, CONFIG};
|
||||
|
||||
use super::{OrganizationId, User, UserId};
|
||||
use id::SendId;
|
||||
@@ -226,7 +226,8 @@ impl Send {
|
||||
self.update_users_revision(conn).await;
|
||||
|
||||
if self.atype == SendType::File as i32 {
|
||||
std::fs::remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).ok();
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
|
||||
operator.remove_all(&self.uuid).await.ok();
|
||||
}
|
||||
|
||||
db_run! { conn: {
|
||||
|
@@ -173,8 +173,8 @@ impl User {
|
||||
/// * `password` - A str which contains a hashed version of the users master password.
|
||||
/// * `new_key` - A String which contains the new aKey value of the users master password.
|
||||
/// * `allow_next_route` - A Option<Vec<String>> with the function names of the next allowed (rocket) routes.
|
||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||
/// After these 2 minutes this stamp will expire.
|
||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||
/// After these 2 minutes this stamp will expire.
|
||||
///
|
||||
pub fn set_password(
|
||||
&mut self,
|
||||
@@ -206,8 +206,8 @@ impl User {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `route_exception` - A Vec<String> with the function names of the next allowed (rocket) routes.
|
||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||
/// After these 2 minutes this stamp will expire.
|
||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||
/// After these 2 minutes this stamp will expire.
|
||||
///
|
||||
pub fn set_stamp_exception(&mut self, route_exception: Vec<String>) {
|
||||
let stamp_exception = UserStampException {
|
||||
@@ -249,7 +249,6 @@ impl User {
|
||||
"emailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
|
||||
"premium": true,
|
||||
"premiumFromOrganization": false,
|
||||
"masterPasswordHint": self.password_hint,
|
||||
"culture": "en-US",
|
||||
"twoFactorEnabled": twofactor_enabled,
|
||||
"key": self.akey,
|
||||
@@ -334,7 +333,7 @@ impl User {
|
||||
|
||||
pub async fn update_uuid_revision(uuid: &UserId, conn: &mut DbConn) {
|
||||
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
|
||||
warn!("Failed to update revision for {}: {:#?}", uuid, e);
|
||||
warn!("Failed to update revision for {uuid}: {e:#?}");
|
||||
}
|
||||
}
|
||||
|
||||
|
33
src/error.rs
33
src/error.rs
@@ -46,6 +46,7 @@ use jsonwebtoken::errors::Error as JwtErr;
|
||||
use lettre::address::AddressError as AddrErr;
|
||||
use lettre::error::Error as LettreErr;
|
||||
use lettre::transport::smtp::Error as SmtpErr;
|
||||
use opendal::Error as OpenDALErr;
|
||||
use openssl::error::ErrorStack as SSLErr;
|
||||
use regex::Error as RegexErr;
|
||||
use reqwest::Error as ReqErr;
|
||||
@@ -59,6 +60,8 @@ use yubico::yubicoerror::YubicoError as YubiErr;
|
||||
#[derive(Serialize)]
|
||||
pub struct Empty {}
|
||||
|
||||
pub struct Compact {}
|
||||
|
||||
// Error struct
|
||||
// Contains a String error message, meant for the user and an enum variant, with an error of different types.
|
||||
//
|
||||
@@ -69,6 +72,7 @@ make_error! {
|
||||
Empty(Empty): _no_source, _serialize,
|
||||
// Used to represent err! calls
|
||||
Simple(String): _no_source, _api_error,
|
||||
Compact(Compact): _no_source, _api_error_small,
|
||||
|
||||
// Used in our custom http client to handle non-global IPs and blocked domains
|
||||
CustomHttpClient(CustomHttpClientError): _has_source, _api_error,
|
||||
@@ -95,6 +99,8 @@ make_error! {
|
||||
|
||||
DieselCon(DieselConErr): _has_source, _api_error,
|
||||
Webauthn(WebauthnErr): _has_source, _api_error,
|
||||
|
||||
OpenDAL(OpenDALErr): _has_source, _api_error,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Error {
|
||||
@@ -132,6 +138,12 @@ impl Error {
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_kind(mut self, kind: ErrorKind) -> Self {
|
||||
self.error = kind;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub const fn with_code(mut self, code: u16) -> Self {
|
||||
self.error_code = code;
|
||||
@@ -200,6 +212,18 @@ fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
|
||||
_serialize(&json, "")
|
||||
}
|
||||
|
||||
fn _api_error_small(_: &impl std::any::Any, msg: &str) -> String {
|
||||
let json = json!({
|
||||
"message": msg,
|
||||
"validationErrors": null,
|
||||
"exceptionMessage": null,
|
||||
"exceptionStackTrace": null,
|
||||
"innerExceptionMessage": null,
|
||||
"object": "error"
|
||||
});
|
||||
_serialize(&json, "")
|
||||
}
|
||||
|
||||
//
|
||||
// Rocket responder impl
|
||||
//
|
||||
@@ -212,9 +236,8 @@ use rocket::response::{self, Responder, Response};
|
||||
impl Responder<'_, 'static> for Error {
|
||||
fn respond_to(self, _: &Request<'_>) -> response::Result<'static> {
|
||||
match self.error {
|
||||
ErrorKind::Empty(_) => {} // Don't print the error in this situation
|
||||
ErrorKind::Simple(_) => {} // Don't print the error in this situation
|
||||
_ => error!(target: "error", "{:#?}", self),
|
||||
ErrorKind::Empty(_) | ErrorKind::Simple(_) | ErrorKind::Compact(_) => {} // Don't print the error in this situation
|
||||
_ => error!(target: "error", "{self:#?}"),
|
||||
};
|
||||
|
||||
let code = Status::from_code(self.error_code).unwrap_or(Status::BadRequest);
|
||||
@@ -228,6 +251,10 @@ impl Responder<'_, 'static> for Error {
|
||||
//
|
||||
#[macro_export]
|
||||
macro_rules! err {
|
||||
($kind:ident, $msg:expr) => {{
|
||||
error!("{}", $msg);
|
||||
return Err($crate::error::Error::new($msg, $msg).with_kind($crate::error::ErrorKind::$kind($crate::error::$kind {})));
|
||||
}};
|
||||
($msg:expr) => {{
|
||||
error!("{}", $msg);
|
||||
return Err($crate::error::Error::new($msg, $msg));
|
||||
|
@@ -6,7 +6,7 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
|
||||
use hickory_resolver::{name_server::TokioConnectionProvider, TokioResolver};
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use reqwest::{
|
||||
@@ -173,7 +173,7 @@ impl std::error::Error for CustomHttpClientError {}
|
||||
#[derive(Debug, Clone)]
|
||||
enum CustomDnsResolver {
|
||||
Default(),
|
||||
Hickory(Arc<TokioAsyncResolver>),
|
||||
Hickory(Arc<TokioResolver>),
|
||||
}
|
||||
type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
||||
|
||||
@@ -184,9 +184,9 @@ impl CustomDnsResolver {
|
||||
}
|
||||
|
||||
fn new() -> Arc<Self> {
|
||||
match read_system_conf() {
|
||||
Ok((config, opts)) => {
|
||||
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
|
||||
match TokioResolver::builder(TokioConnectionProvider::default()) {
|
||||
Ok(builder) => {
|
||||
let resolver = builder.build();
|
||||
Arc::new(Self::Hickory(Arc::new(resolver)))
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -244,3 +244,61 @@ impl Resolve for CustomDnsResolver {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(s3)]
|
||||
pub(crate) mod aws {
|
||||
use aws_smithy_runtime_api::client::{
|
||||
http::{HttpClient, HttpConnector, HttpConnectorFuture, HttpConnectorSettings, SharedHttpConnector},
|
||||
orchestrator::HttpResponse,
|
||||
result::ConnectorError,
|
||||
runtime_components::RuntimeComponents,
|
||||
};
|
||||
use reqwest::Client;
|
||||
|
||||
// Adapter that wraps reqwest to be compatible with the AWS SDK
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AwsReqwestConnector {
|
||||
pub(crate) client: Client,
|
||||
}
|
||||
|
||||
impl HttpConnector for AwsReqwestConnector {
|
||||
fn call(&self, request: aws_smithy_runtime_api::client::orchestrator::HttpRequest) -> HttpConnectorFuture {
|
||||
// Convert the AWS-style request to a reqwest request
|
||||
let client = self.client.clone();
|
||||
let future = async move {
|
||||
let method = reqwest::Method::from_bytes(request.method().as_bytes())
|
||||
.map_err(|e| ConnectorError::user(Box::new(e)))?;
|
||||
let mut req_builder = client.request(method, request.uri().to_string());
|
||||
|
||||
for (name, value) in request.headers() {
|
||||
req_builder = req_builder.header(name, value);
|
||||
}
|
||||
|
||||
if let Some(body_bytes) = request.body().bytes() {
|
||||
req_builder = req_builder.body(body_bytes.to_vec());
|
||||
}
|
||||
|
||||
let response = req_builder.send().await.map_err(|e| ConnectorError::io(Box::new(e)))?;
|
||||
|
||||
let status = response.status().into();
|
||||
let bytes = response.bytes().await.map_err(|e| ConnectorError::io(Box::new(e)))?;
|
||||
|
||||
Ok(HttpResponse::new(status, bytes.into()))
|
||||
};
|
||||
|
||||
HttpConnectorFuture::new(Box::pin(future))
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpClient for AwsReqwestConnector {
|
||||
fn http_connector(
|
||||
&self,
|
||||
_settings: &HttpConnectorSettings,
|
||||
_components: &RuntimeComponents,
|
||||
) -> SharedHttpConnector {
|
||||
SharedHttpConnector::new(AwsReqwestConnector {
|
||||
client: self.client.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
57
src/mail.rs
57
src/mail.rs
@@ -85,7 +85,7 @@ fn smtp_transport() -> AsyncSmtpTransport<Tokio1Executor> {
|
||||
smtp_client.authentication(selected_mechanisms)
|
||||
} else {
|
||||
// Only show a warning, and return without setting an actual authentication mechanism
|
||||
warn!("No valid SMTP Auth mechanism found for '{}', using default values", mechanism);
|
||||
warn!("No valid SMTP Auth mechanism found for '{mechanism}', using default values");
|
||||
smtp_client
|
||||
}
|
||||
}
|
||||
@@ -201,6 +201,27 @@ pub async fn send_verify_email(address: &str, user_id: &UserId) -> EmptyResult {
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub async fn send_register_verify_email(email: &str, token: &str) -> EmptyResult {
|
||||
let mut query = url::Url::parse("https://query.builder").unwrap();
|
||||
query.query_pairs_mut().append_pair("email", email).append_pair("token", token);
|
||||
let query_string = match query.query() {
|
||||
None => err!("Failed to build verify URL query parameters"),
|
||||
Some(query) => query,
|
||||
};
|
||||
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/register_verify_email",
|
||||
json!({
|
||||
// `url.Url` would place the anchor `#` after the query parameters
|
||||
"url": format!("{}/#/finish-signup/?{query_string}", CONFIG.domain()),
|
||||
"img_src": CONFIG._smtp_img_src(),
|
||||
"email": email,
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(email, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub async fn send_welcome(address: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/welcome",
|
||||
@@ -293,7 +314,7 @@ pub async fn send_invite(
|
||||
"email/send_org_invite",
|
||||
json!({
|
||||
// `url.Url` would place the anchor `#` after the query parameters
|
||||
"url": format!("{}/#/accept-organization/?{}", CONFIG.domain(), query_string),
|
||||
"url": format!("{}/#/accept-organization/?{query_string}", CONFIG.domain()),
|
||||
"img_src": CONFIG._smtp_img_src(),
|
||||
"org_name": org_name,
|
||||
}),
|
||||
@@ -549,6 +570,20 @@ pub async fn send_change_email(address: &str, token: &str) -> EmptyResult {
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub async fn send_change_email_existing(address: &str, acting_address: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/change_email_existing",
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"img_src": CONFIG._smtp_img_src(),
|
||||
"existing_address": address,
|
||||
"acting_address": acting_address,
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub async fn send_test(address: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/smtp_test",
|
||||
@@ -594,13 +629,13 @@ async fn send_with_selected_transport(email: Message) -> EmptyResult {
|
||||
// Match some common errors and make them more user friendly
|
||||
Err(e) => {
|
||||
if e.is_client() {
|
||||
debug!("Sendmail client error: {:?}", e);
|
||||
debug!("Sendmail client error: {e:?}");
|
||||
err!(format!("Sendmail client error: {e}"));
|
||||
} else if e.is_response() {
|
||||
debug!("Sendmail response error: {:?}", e);
|
||||
debug!("Sendmail response error: {e:?}");
|
||||
err!(format!("Sendmail response error: {e}"));
|
||||
} else {
|
||||
debug!("Sendmail error: {:?}", e);
|
||||
debug!("Sendmail error: {e:?}");
|
||||
err!(format!("Sendmail error: {e}"));
|
||||
}
|
||||
}
|
||||
@@ -611,13 +646,13 @@ async fn send_with_selected_transport(email: Message) -> EmptyResult {
|
||||
// Match some common errors and make them more user friendly
|
||||
Err(e) => {
|
||||
if e.is_client() {
|
||||
debug!("SMTP client error: {:#?}", e);
|
||||
debug!("SMTP client error: {e:#?}");
|
||||
err!(format!("SMTP client error: {e}"));
|
||||
} else if e.is_transient() {
|
||||
debug!("SMTP 4xx error: {:#?}", e);
|
||||
debug!("SMTP 4xx error: {e:#?}");
|
||||
err!(format!("SMTP 4xx error: {e}"));
|
||||
} else if e.is_permanent() {
|
||||
debug!("SMTP 5xx error: {:#?}", e);
|
||||
debug!("SMTP 5xx error: {e:#?}");
|
||||
let mut msg = e.to_string();
|
||||
// Add a special check for 535 to add a more descriptive message
|
||||
if msg.contains("(535)") {
|
||||
@@ -625,13 +660,13 @@ async fn send_with_selected_transport(email: Message) -> EmptyResult {
|
||||
}
|
||||
err!(format!("SMTP 5xx error: {msg}"));
|
||||
} else if e.is_timeout() {
|
||||
debug!("SMTP timeout error: {:#?}", e);
|
||||
debug!("SMTP timeout error: {e:#?}");
|
||||
err!(format!("SMTP timeout error: {e}"));
|
||||
} else if e.is_tls() {
|
||||
debug!("SMTP encryption error: {:#?}", e);
|
||||
debug!("SMTP encryption error: {e:#?}");
|
||||
err!(format!("SMTP encryption error: {e}"));
|
||||
} else {
|
||||
debug!("SMTP error: {:#?}", e);
|
||||
debug!("SMTP error: {e:#?}");
|
||||
err!(format!("SMTP error: {e}"));
|
||||
}
|
||||
}
|
||||
|
38
src/main.rs
38
src/main.rs
@@ -61,7 +61,7 @@ mod util;
|
||||
use crate::api::core::two_factor::duo_oidc::purge_duo_contexts;
|
||||
use crate::api::purge_auth_requests;
|
||||
use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS};
|
||||
pub use config::CONFIG;
|
||||
pub use config::{PathType, CONFIG};
|
||||
pub use error::{Error, MapResult};
|
||||
use rocket::data::{Limits, ToByteUnit};
|
||||
use std::sync::{atomic::Ordering, Arc};
|
||||
@@ -75,16 +75,13 @@ async fn main() -> Result<(), Error> {
|
||||
let level = init_logging()?;
|
||||
|
||||
check_data_folder().await;
|
||||
auth::initialize_keys().unwrap_or_else(|e| {
|
||||
auth::initialize_keys().await.unwrap_or_else(|e| {
|
||||
error!("Error creating private key '{}'\n{e:?}\nExiting Vaultwarden!", CONFIG.private_rsa_key());
|
||||
exit(1);
|
||||
});
|
||||
check_web_vault();
|
||||
|
||||
create_dir(&CONFIG.icon_cache_folder(), "icon cache");
|
||||
create_dir(&CONFIG.tmp_folder(), "tmp folder");
|
||||
create_dir(&CONFIG.sends_folder(), "sends folder");
|
||||
create_dir(&CONFIG.attachments_folder(), "attachments folder");
|
||||
|
||||
let pool = create_db_pool().await;
|
||||
schedule_jobs(pool.clone());
|
||||
@@ -430,10 +427,7 @@ fn init_logging() -> Result<log::LevelFilter, Error> {
|
||||
}
|
||||
None => error!(
|
||||
target: "panic",
|
||||
"thread '{}' panicked at '{}'\n{:}",
|
||||
thread,
|
||||
msg,
|
||||
backtrace
|
||||
"thread '{thread}' panicked at '{msg}'\n{backtrace:}"
|
||||
),
|
||||
}
|
||||
}));
|
||||
@@ -453,7 +447,7 @@ fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
||||
match syslog::unix(syslog_fmt) {
|
||||
Ok(sl) => logger.chain(sl),
|
||||
Err(e) => {
|
||||
error!("Unable to connect to syslog: {:?}", e);
|
||||
error!("Unable to connect to syslog: {e:?}");
|
||||
logger
|
||||
}
|
||||
}
|
||||
@@ -467,9 +461,27 @@ fn create_dir(path: &str, description: &str) {
|
||||
|
||||
async fn check_data_folder() {
|
||||
let data_folder = &CONFIG.data_folder();
|
||||
|
||||
if data_folder.starts_with("s3://") {
|
||||
if let Err(e) = CONFIG
|
||||
.opendal_operator_for_path_type(PathType::Data)
|
||||
.unwrap_or_else(|e| {
|
||||
error!("Failed to create S3 operator for data folder '{data_folder}': {e:?}");
|
||||
exit(1);
|
||||
})
|
||||
.check()
|
||||
.await
|
||||
{
|
||||
error!("Could not access S3 data folder '{data_folder}': {e:?}");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
let path = Path::new(data_folder);
|
||||
if !path.exists() {
|
||||
error!("Data folder '{}' doesn't exist.", data_folder);
|
||||
error!("Data folder '{data_folder}' doesn't exist.");
|
||||
if is_running_in_container() {
|
||||
error!("Verify that your data volume is mounted at the correct location.");
|
||||
} else {
|
||||
@@ -478,7 +490,7 @@ async fn check_data_folder() {
|
||||
exit(1);
|
||||
}
|
||||
if !path.is_dir() {
|
||||
error!("Data folder '{}' is not a directory.", data_folder);
|
||||
error!("Data folder '{data_folder}' is not a directory.");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -552,7 +564,7 @@ async fn create_db_pool() -> db::DbPool {
|
||||
match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()).await {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
error!("Error creating database pool: {:?}", e);
|
||||
error!("Error creating database pool: {e:?}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
8
src/static/scripts/admin.css
vendored
8
src/static/scripts/admin.css
vendored
@@ -38,8 +38,8 @@ img {
|
||||
max-width: 130px;
|
||||
}
|
||||
#users-table .vw-actions, #orgs-table .vw-actions {
|
||||
min-width: 135px;
|
||||
max-width: 140px;
|
||||
min-width: 155px;
|
||||
max-width: 160px;
|
||||
}
|
||||
#users-table .vw-org-cell {
|
||||
max-height: 120px;
|
||||
@@ -54,3 +54,7 @@ img {
|
||||
.vw-copy-toast {
|
||||
width: 15rem;
|
||||
}
|
||||
|
||||
.abbr-badge {
|
||||
cursor: help;
|
||||
}
|
||||
|
23
src/static/scripts/admin_diagnostics.js
vendored
23
src/static/scripts/admin_diagnostics.js
vendored
@@ -29,7 +29,7 @@ function isValidIp(ip) {
|
||||
return ipv4Regex.test(ip) || ipv6Regex.test(ip);
|
||||
}
|
||||
|
||||
function checkVersions(platform, installed, latest, commit=null) {
|
||||
function checkVersions(platform, installed, latest, commit=null, pre_release=false) {
|
||||
if (installed === "-" || latest === "-") {
|
||||
document.getElementById(`${platform}-failed`).classList.remove("d-none");
|
||||
return;
|
||||
@@ -37,10 +37,12 @@ function checkVersions(platform, installed, latest, commit=null) {
|
||||
|
||||
// Only check basic versions, no commit revisions
|
||||
if (commit === null || installed.indexOf("-") === -1) {
|
||||
if (installed !== latest) {
|
||||
document.getElementById(`${platform}-warning`).classList.remove("d-none");
|
||||
} else {
|
||||
if (platform === "web" && pre_release === true) {
|
||||
document.getElementById(`${platform}-prerelease`).classList.remove("d-none");
|
||||
} else if (installed == latest) {
|
||||
document.getElementById(`${platform}-success`).classList.remove("d-none");
|
||||
} else {
|
||||
document.getElementById(`${platform}-warning`).classList.remove("d-none");
|
||||
}
|
||||
} else {
|
||||
// Check if this is a branched version.
|
||||
@@ -86,7 +88,7 @@ async function generateSupportString(event, dj) {
|
||||
supportString += `* Running within a container: ${dj.running_within_container} (Base: ${dj.container_base_image})\n`;
|
||||
supportString += `* Database type: ${dj.db_type}\n`;
|
||||
supportString += `* Database version: ${dj.db_version}\n`;
|
||||
supportString += `* Environment settings overridden!: ${dj.overrides !== ""}\n`;
|
||||
supportString += `* Uses config.json: ${dj.overrides !== ""}\n`;
|
||||
supportString += `* Uses a reverse proxy: ${dj.ip_header_exists}\n`;
|
||||
if (dj.ip_header_exists) {
|
||||
supportString += `* IP Header check: ${dj.ip_header_match} (${dj.ip_header_name})\n`;
|
||||
@@ -94,6 +96,9 @@ async function generateSupportString(event, dj) {
|
||||
supportString += `* Internet access: ${dj.has_http_access}\n`;
|
||||
supportString += `* Internet access via a proxy: ${dj.uses_proxy}\n`;
|
||||
supportString += `* DNS Check: ${dnsCheck}\n`;
|
||||
if (dj.tz_env !== "") {
|
||||
supportString += `* TZ environment: ${dj.tz_env}\n`;
|
||||
}
|
||||
supportString += `* Browser/Server Time Check: ${timeCheck}\n`;
|
||||
supportString += `* Server/NTP Time Check: ${ntpTimeCheck}\n`;
|
||||
supportString += `* Domain Configuration Check: ${domainCheck}\n`;
|
||||
@@ -203,11 +208,9 @@ function initVersionCheck(dj) {
|
||||
}
|
||||
checkVersions("server", serverInstalled, serverLatest, serverLatestCommit);
|
||||
|
||||
if (!dj.running_within_container) {
|
||||
const webInstalled = dj.web_vault_version;
|
||||
const webLatest = dj.latest_web_build;
|
||||
checkVersions("web", webInstalled, webLatest);
|
||||
}
|
||||
const webInstalled = dj.web_vault_version;
|
||||
const webLatest = dj.latest_web_build;
|
||||
checkVersions("web", webInstalled, webLatest, null, dj.web_vault_pre_release);
|
||||
}
|
||||
|
||||
function checkDns(dns_resolved) {
|
||||
|
39
src/static/scripts/bootstrap.bundle.js
vendored
39
src/static/scripts/bootstrap.bundle.js
vendored
@@ -1,6 +1,6 @@
|
||||
/*!
|
||||
* Bootstrap v5.3.3 (https://getbootstrap.com/)
|
||||
* Copyright 2011-2024 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
|
||||
* Bootstrap v5.3.7 (https://getbootstrap.com/)
|
||||
* Copyright 2011-2025 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
|
||||
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
|
||||
*/
|
||||
(function (global, factory) {
|
||||
@@ -205,7 +205,7 @@
|
||||
* @param {HTMLElement} element
|
||||
* @return void
|
||||
*
|
||||
* @see https://www.charistheo.io/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation
|
||||
* @see https://www.harrytheo.com/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation
|
||||
*/
|
||||
const reflow = element => {
|
||||
element.offsetHeight; // eslint-disable-line no-unused-expressions
|
||||
@@ -250,7 +250,7 @@
|
||||
});
|
||||
};
|
||||
const execute = (possibleCallback, args = [], defaultValue = possibleCallback) => {
|
||||
return typeof possibleCallback === 'function' ? possibleCallback(...args) : defaultValue;
|
||||
return typeof possibleCallback === 'function' ? possibleCallback.call(...args) : defaultValue;
|
||||
};
|
||||
const executeAfterTransition = (callback, transitionElement, waitForTransition = true) => {
|
||||
if (!waitForTransition) {
|
||||
@@ -572,7 +572,7 @@
|
||||
const bsKeys = Object.keys(element.dataset).filter(key => key.startsWith('bs') && !key.startsWith('bsConfig'));
|
||||
for (const key of bsKeys) {
|
||||
let pureKey = key.replace(/^bs/, '');
|
||||
pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1, pureKey.length);
|
||||
pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1);
|
||||
attributes[pureKey] = normalizeData(element.dataset[key]);
|
||||
}
|
||||
return attributes;
|
||||
@@ -647,7 +647,7 @@
|
||||
* Constants
|
||||
*/
|
||||
|
||||
const VERSION = '5.3.3';
|
||||
const VERSION = '5.3.7';
|
||||
|
||||
/**
|
||||
* Class definition
|
||||
@@ -673,6 +673,8 @@
|
||||
this[propertyName] = null;
|
||||
}
|
||||
}
|
||||
|
||||
// Private
|
||||
_queueCallback(callback, element, isAnimated = true) {
|
||||
executeAfterTransition(callback, element, isAnimated);
|
||||
}
|
||||
@@ -1604,11 +1606,11 @@
|
||||
this._element.style[dimension] = '';
|
||||
this._queueCallback(complete, this._element, true);
|
||||
}
|
||||
|
||||
// Private
|
||||
_isShown(element = this._element) {
|
||||
return element.classList.contains(CLASS_NAME_SHOW$7);
|
||||
}
|
||||
|
||||
// Private
|
||||
_configAfterMerge(config) {
|
||||
config.toggle = Boolean(config.toggle); // Coerce string values
|
||||
config.parent = getElement(config.parent);
|
||||
@@ -2666,7 +2668,6 @@
|
||||
var popperOffsets = computeOffsets({
|
||||
reference: referenceClientRect,
|
||||
element: popperRect,
|
||||
strategy: 'absolute',
|
||||
placement: placement
|
||||
});
|
||||
var popperClientRect = rectToClientRect(Object.assign({}, popperRect, popperOffsets));
|
||||
@@ -2994,7 +2995,6 @@
|
||||
state.modifiersData[name] = computeOffsets({
|
||||
reference: state.rects.reference,
|
||||
element: state.rects.popper,
|
||||
strategy: 'absolute',
|
||||
placement: state.placement
|
||||
});
|
||||
} // eslint-disable-next-line import/no-unused-modules
|
||||
@@ -3690,6 +3690,9 @@
|
||||
this._element.setAttribute('aria-expanded', 'false');
|
||||
Manipulator.removeDataAttribute(this._menu, 'popper');
|
||||
EventHandler.trigger(this._element, EVENT_HIDDEN$5, relatedTarget);
|
||||
|
||||
// Explicitly return focus to the trigger element
|
||||
this._element.focus();
|
||||
}
|
||||
_getConfig(config) {
|
||||
config = super._getConfig(config);
|
||||
@@ -3701,7 +3704,7 @@
|
||||
}
|
||||
_createPopper() {
|
||||
if (typeof Popper === 'undefined') {
|
||||
throw new TypeError('Bootstrap\'s dropdowns require Popper (https://popper.js.org)');
|
||||
throw new TypeError('Bootstrap\'s dropdowns require Popper (https://popper.js.org/docs/v2/)');
|
||||
}
|
||||
let referenceElement = this._element;
|
||||
if (this._config.reference === 'parent') {
|
||||
@@ -3780,7 +3783,7 @@
|
||||
}
|
||||
return {
|
||||
...defaultBsPopperConfig,
|
||||
...execute(this._config.popperConfig, [defaultBsPopperConfig])
|
||||
...execute(this._config.popperConfig, [undefined, defaultBsPopperConfig])
|
||||
};
|
||||
}
|
||||
_selectMenuItem({
|
||||
@@ -4802,7 +4805,6 @@
|
||||
*
|
||||
* Shout-out to Angular https://github.com/angular/angular/blob/15.2.8/packages/core/src/sanitization/url_sanitizer.ts#L38
|
||||
*/
|
||||
// eslint-disable-next-line unicorn/better-regex
|
||||
const SAFE_URL_PATTERN = /^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i;
|
||||
const allowedAttribute = (attribute, allowedAttributeList) => {
|
||||
const attributeName = attribute.nodeName.toLowerCase();
|
||||
@@ -4967,7 +4969,7 @@
|
||||
return this._config.sanitize ? sanitizeHtml(arg, this._config.allowList, this._config.sanitizeFn) : arg;
|
||||
}
|
||||
_resolvePossibleFunction(arg) {
|
||||
return execute(arg, [this]);
|
||||
return execute(arg, [undefined, this]);
|
||||
}
|
||||
_putElementInTemplate(element, templateElement) {
|
||||
if (this._config.html) {
|
||||
@@ -5066,7 +5068,7 @@
|
||||
class Tooltip extends BaseComponent {
|
||||
constructor(element, config) {
|
||||
if (typeof Popper === 'undefined') {
|
||||
throw new TypeError('Bootstrap\'s tooltips require Popper (https://popper.js.org)');
|
||||
throw new TypeError('Bootstrap\'s tooltips require Popper (https://popper.js.org/docs/v2/)');
|
||||
}
|
||||
super(element, config);
|
||||
|
||||
@@ -5112,7 +5114,6 @@
|
||||
if (!this._isEnabled) {
|
||||
return;
|
||||
}
|
||||
this._activeTrigger.click = !this._activeTrigger.click;
|
||||
if (this._isShown()) {
|
||||
this._leave();
|
||||
return;
|
||||
@@ -5300,7 +5301,7 @@
|
||||
return offset;
|
||||
}
|
||||
_resolvePossibleFunction(arg) {
|
||||
return execute(arg, [this._element]);
|
||||
return execute(arg, [this._element, this._element]);
|
||||
}
|
||||
_getPopperConfig(attachment) {
|
||||
const defaultBsPopperConfig = {
|
||||
@@ -5338,7 +5339,7 @@
|
||||
};
|
||||
return {
|
||||
...defaultBsPopperConfig,
|
||||
...execute(this._config.popperConfig, [defaultBsPopperConfig])
|
||||
...execute(this._config.popperConfig, [undefined, defaultBsPopperConfig])
|
||||
};
|
||||
}
|
||||
_setListeners() {
|
||||
@@ -5347,6 +5348,7 @@
|
||||
if (trigger === 'click') {
|
||||
EventHandler.on(this._element, this.constructor.eventName(EVENT_CLICK$1), this._config.selector, event => {
|
||||
const context = this._initializeOnDelegatedTarget(event);
|
||||
context._activeTrigger[TRIGGER_CLICK] = !(context._isShown() && context._activeTrigger[TRIGGER_CLICK]);
|
||||
context.toggle();
|
||||
});
|
||||
} else if (trigger !== TRIGGER_MANUAL) {
|
||||
@@ -6212,7 +6214,6 @@
|
||||
}
|
||||
|
||||
// Private
|
||||
|
||||
_maybeScheduleHide() {
|
||||
if (!this._config.autohide) {
|
||||
return;
|
||||
|
248
src/static/scripts/bootstrap.css
vendored
248
src/static/scripts/bootstrap.css
vendored
@@ -1,7 +1,7 @@
|
||||
@charset "UTF-8";
|
||||
/*!
|
||||
* Bootstrap v5.3.3 (https://getbootstrap.com/)
|
||||
* Copyright 2011-2024 The Bootstrap Authors
|
||||
* Bootstrap v5.3.7 (https://getbootstrap.com/)
|
||||
* Copyright 2011-2025 The Bootstrap Authors
|
||||
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
|
||||
*/
|
||||
:root,
|
||||
@@ -517,8 +517,8 @@ legend {
|
||||
width: 100%;
|
||||
padding: 0;
|
||||
margin-bottom: 0.5rem;
|
||||
font-size: calc(1.275rem + 0.3vw);
|
||||
line-height: inherit;
|
||||
font-size: calc(1.275rem + 0.3vw);
|
||||
}
|
||||
@media (min-width: 1200px) {
|
||||
legend {
|
||||
@@ -601,9 +601,9 @@ progress {
|
||||
}
|
||||
|
||||
.display-1 {
|
||||
font-size: calc(1.625rem + 4.5vw);
|
||||
font-weight: 300;
|
||||
line-height: 1.2;
|
||||
font-size: calc(1.625rem + 4.5vw);
|
||||
}
|
||||
@media (min-width: 1200px) {
|
||||
.display-1 {
|
||||
@@ -612,9 +612,9 @@ progress {
|
||||
}
|
||||
|
||||
.display-2 {
|
||||
font-size: calc(1.575rem + 3.9vw);
|
||||
font-weight: 300;
|
||||
line-height: 1.2;
|
||||
font-size: calc(1.575rem + 3.9vw);
|
||||
}
|
||||
@media (min-width: 1200px) {
|
||||
.display-2 {
|
||||
@@ -623,9 +623,9 @@ progress {
|
||||
}
|
||||
|
||||
.display-3 {
|
||||
font-size: calc(1.525rem + 3.3vw);
|
||||
font-weight: 300;
|
||||
line-height: 1.2;
|
||||
font-size: calc(1.525rem + 3.3vw);
|
||||
}
|
||||
@media (min-width: 1200px) {
|
||||
.display-3 {
|
||||
@@ -634,9 +634,9 @@ progress {
|
||||
}
|
||||
|
||||
.display-4 {
|
||||
font-size: calc(1.475rem + 2.7vw);
|
||||
font-weight: 300;
|
||||
line-height: 1.2;
|
||||
font-size: calc(1.475rem + 2.7vw);
|
||||
}
|
||||
@media (min-width: 1200px) {
|
||||
.display-4 {
|
||||
@@ -645,9 +645,9 @@ progress {
|
||||
}
|
||||
|
||||
.display-5 {
|
||||
font-size: calc(1.425rem + 2.1vw);
|
||||
font-weight: 300;
|
||||
line-height: 1.2;
|
||||
font-size: calc(1.425rem + 2.1vw);
|
||||
}
|
||||
@media (min-width: 1200px) {
|
||||
.display-5 {
|
||||
@@ -656,9 +656,9 @@ progress {
|
||||
}
|
||||
|
||||
.display-6 {
|
||||
font-size: calc(1.375rem + 1.5vw);
|
||||
font-weight: 300;
|
||||
line-height: 1.2;
|
||||
font-size: calc(1.375rem + 1.5vw);
|
||||
}
|
||||
@media (min-width: 1200px) {
|
||||
.display-6 {
|
||||
@@ -803,7 +803,7 @@ progress {
|
||||
}
|
||||
|
||||
.col {
|
||||
flex: 1 0 0%;
|
||||
flex: 1 0 0;
|
||||
}
|
||||
|
||||
.row-cols-auto > * {
|
||||
@@ -1012,7 +1012,7 @@ progress {
|
||||
|
||||
@media (min-width: 576px) {
|
||||
.col-sm {
|
||||
flex: 1 0 0%;
|
||||
flex: 1 0 0;
|
||||
}
|
||||
.row-cols-sm-auto > * {
|
||||
flex: 0 0 auto;
|
||||
@@ -1181,7 +1181,7 @@ progress {
|
||||
}
|
||||
@media (min-width: 768px) {
|
||||
.col-md {
|
||||
flex: 1 0 0%;
|
||||
flex: 1 0 0;
|
||||
}
|
||||
.row-cols-md-auto > * {
|
||||
flex: 0 0 auto;
|
||||
@@ -1350,7 +1350,7 @@ progress {
|
||||
}
|
||||
@media (min-width: 992px) {
|
||||
.col-lg {
|
||||
flex: 1 0 0%;
|
||||
flex: 1 0 0;
|
||||
}
|
||||
.row-cols-lg-auto > * {
|
||||
flex: 0 0 auto;
|
||||
@@ -1519,7 +1519,7 @@ progress {
|
||||
}
|
||||
@media (min-width: 1200px) {
|
||||
.col-xl {
|
||||
flex: 1 0 0%;
|
||||
flex: 1 0 0;
|
||||
}
|
||||
.row-cols-xl-auto > * {
|
||||
flex: 0 0 auto;
|
||||
@@ -1688,7 +1688,7 @@ progress {
|
||||
}
|
||||
@media (min-width: 1400px) {
|
||||
.col-xxl {
|
||||
flex: 1 0 0%;
|
||||
flex: 1 0 0;
|
||||
}
|
||||
.row-cols-xxl-auto > * {
|
||||
flex: 0 0 auto;
|
||||
@@ -2156,10 +2156,6 @@ progress {
|
||||
display: block;
|
||||
padding: 0;
|
||||
}
|
||||
.form-control::-moz-placeholder {
|
||||
color: var(--bs-secondary-color);
|
||||
opacity: 1;
|
||||
}
|
||||
.form-control::placeholder {
|
||||
color: var(--bs-secondary-color);
|
||||
opacity: 1;
|
||||
@@ -2607,9 +2603,11 @@ textarea.form-control-lg {
|
||||
top: 0;
|
||||
left: 0;
|
||||
z-index: 2;
|
||||
max-width: 100%;
|
||||
height: 100%;
|
||||
padding: 1rem 0.75rem;
|
||||
overflow: hidden;
|
||||
color: rgba(var(--bs-body-color-rgb), 0.65);
|
||||
text-align: start;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
@@ -2627,17 +2625,10 @@ textarea.form-control-lg {
|
||||
.form-floating > .form-control-plaintext {
|
||||
padding: 1rem 0.75rem;
|
||||
}
|
||||
.form-floating > .form-control::-moz-placeholder, .form-floating > .form-control-plaintext::-moz-placeholder {
|
||||
color: transparent;
|
||||
}
|
||||
.form-floating > .form-control::placeholder,
|
||||
.form-floating > .form-control-plaintext::placeholder {
|
||||
color: transparent;
|
||||
}
|
||||
.form-floating > .form-control:not(:-moz-placeholder-shown), .form-floating > .form-control-plaintext:not(:-moz-placeholder-shown) {
|
||||
padding-top: 1.625rem;
|
||||
padding-bottom: 0.625rem;
|
||||
}
|
||||
.form-floating > .form-control:focus, .form-floating > .form-control:not(:placeholder-shown),
|
||||
.form-floating > .form-control-plaintext:focus,
|
||||
.form-floating > .form-control-plaintext:not(:placeholder-shown) {
|
||||
@@ -2652,43 +2643,30 @@ textarea.form-control-lg {
|
||||
.form-floating > .form-select {
|
||||
padding-top: 1.625rem;
|
||||
padding-bottom: 0.625rem;
|
||||
}
|
||||
.form-floating > .form-control:not(:-moz-placeholder-shown) ~ label {
|
||||
color: rgba(var(--bs-body-color-rgb), 0.65);
|
||||
transform: scale(0.85) translateY(-0.5rem) translateX(0.15rem);
|
||||
padding-left: 0.75rem;
|
||||
}
|
||||
.form-floating > .form-control:focus ~ label,
|
||||
.form-floating > .form-control:not(:placeholder-shown) ~ label,
|
||||
.form-floating > .form-control-plaintext ~ label,
|
||||
.form-floating > .form-select ~ label {
|
||||
color: rgba(var(--bs-body-color-rgb), 0.65);
|
||||
transform: scale(0.85) translateY(-0.5rem) translateX(0.15rem);
|
||||
}
|
||||
.form-floating > .form-control:not(:-moz-placeholder-shown) ~ label::after {
|
||||
position: absolute;
|
||||
inset: 1rem 0.375rem;
|
||||
z-index: -1;
|
||||
height: 1.5em;
|
||||
content: "";
|
||||
background-color: var(--bs-body-bg);
|
||||
border-radius: var(--bs-border-radius);
|
||||
}
|
||||
.form-floating > .form-control:focus ~ label::after,
|
||||
.form-floating > .form-control:not(:placeholder-shown) ~ label::after,
|
||||
.form-floating > .form-control-plaintext ~ label::after,
|
||||
.form-floating > .form-select ~ label::after {
|
||||
position: absolute;
|
||||
inset: 1rem 0.375rem;
|
||||
z-index: -1;
|
||||
height: 1.5em;
|
||||
content: "";
|
||||
background-color: var(--bs-body-bg);
|
||||
border-radius: var(--bs-border-radius);
|
||||
}
|
||||
.form-floating > .form-control:-webkit-autofill ~ label {
|
||||
color: rgba(var(--bs-body-color-rgb), 0.65);
|
||||
transform: scale(0.85) translateY(-0.5rem) translateX(0.15rem);
|
||||
}
|
||||
.form-floating > textarea:focus ~ label::after,
|
||||
.form-floating > textarea:not(:placeholder-shown) ~ label::after {
|
||||
position: absolute;
|
||||
inset: 1rem 0.375rem;
|
||||
z-index: -1;
|
||||
height: 1.5em;
|
||||
content: "";
|
||||
background-color: var(--bs-body-bg);
|
||||
border-radius: var(--bs-border-radius);
|
||||
}
|
||||
.form-floating > textarea:disabled ~ label::after {
|
||||
background-color: var(--bs-secondary-bg);
|
||||
}
|
||||
.form-floating > .form-control-plaintext ~ label {
|
||||
border-width: var(--bs-border-width) 0;
|
||||
}
|
||||
@@ -2696,10 +2674,6 @@ textarea.form-control-lg {
|
||||
.form-floating > .form-control:disabled ~ label {
|
||||
color: #6c757d;
|
||||
}
|
||||
.form-floating > :disabled ~ label::after,
|
||||
.form-floating > .form-control:disabled ~ label::after {
|
||||
background-color: var(--bs-secondary-bg);
|
||||
}
|
||||
|
||||
.input-group {
|
||||
position: relative;
|
||||
@@ -2782,7 +2756,7 @@ textarea.form-control-lg {
|
||||
border-bottom-right-radius: 0;
|
||||
}
|
||||
.input-group > :not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback) {
|
||||
margin-left: calc(var(--bs-border-width) * -1);
|
||||
margin-left: calc(-1 * var(--bs-border-width));
|
||||
border-top-left-radius: 0;
|
||||
border-bottom-left-radius: 0;
|
||||
}
|
||||
@@ -2824,7 +2798,7 @@ textarea.form-control-lg {
|
||||
.was-validated .form-control:valid, .form-control.is-valid {
|
||||
border-color: var(--bs-form-valid-border-color);
|
||||
padding-right: calc(1.5em + 0.75rem);
|
||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");
|
||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1'/%3e%3c/svg%3e");
|
||||
background-repeat: no-repeat;
|
||||
background-position: right calc(0.375em + 0.1875rem) center;
|
||||
background-size: calc(0.75em + 0.375rem) calc(0.75em + 0.375rem);
|
||||
@@ -2843,7 +2817,7 @@ textarea.form-control-lg {
|
||||
border-color: var(--bs-form-valid-border-color);
|
||||
}
|
||||
.was-validated .form-select:valid:not([multiple]):not([size]), .was-validated .form-select:valid:not([multiple])[size="1"], .form-select.is-valid:not([multiple]):not([size]), .form-select.is-valid:not([multiple])[size="1"] {
|
||||
--bs-form-select-bg-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");
|
||||
--bs-form-select-bg-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1'/%3e%3c/svg%3e");
|
||||
padding-right: 4.125rem;
|
||||
background-position: right 0.75rem center, center right 2.25rem;
|
||||
background-size: 16px 12px, calc(0.75em + 0.375rem) calc(0.75em + 0.375rem);
|
||||
@@ -3755,7 +3729,7 @@ textarea.form-control-lg {
|
||||
}
|
||||
.btn-group > :not(.btn-check:first-child) + .btn,
|
||||
.btn-group > .btn-group:not(:first-child) {
|
||||
margin-left: calc(var(--bs-border-width) * -1);
|
||||
margin-left: calc(-1 * var(--bs-border-width));
|
||||
}
|
||||
.btn-group > .btn:not(:last-child):not(.dropdown-toggle),
|
||||
.btn-group > .btn.dropdown-toggle-split:first-child,
|
||||
@@ -3802,14 +3776,15 @@ textarea.form-control-lg {
|
||||
}
|
||||
.btn-group-vertical > .btn:not(:first-child),
|
||||
.btn-group-vertical > .btn-group:not(:first-child) {
|
||||
margin-top: calc(var(--bs-border-width) * -1);
|
||||
margin-top: calc(-1 * var(--bs-border-width));
|
||||
}
|
||||
.btn-group-vertical > .btn:not(:last-child):not(.dropdown-toggle),
|
||||
.btn-group-vertical > .btn-group:not(:last-child) > .btn {
|
||||
border-bottom-right-radius: 0;
|
||||
border-bottom-left-radius: 0;
|
||||
}
|
||||
.btn-group-vertical > .btn ~ .btn,
|
||||
.btn-group-vertical > .btn:nth-child(n+3),
|
||||
.btn-group-vertical > :not(.btn-check) + .btn,
|
||||
.btn-group-vertical > .btn-group:not(:first-child) > .btn {
|
||||
border-top-left-radius: 0;
|
||||
border-top-right-radius: 0;
|
||||
@@ -3933,8 +3908,8 @@ textarea.form-control-lg {
|
||||
|
||||
.nav-justified > .nav-link,
|
||||
.nav-justified .nav-item {
|
||||
flex-basis: 0;
|
||||
flex-grow: 1;
|
||||
flex-basis: 0;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
@@ -4035,8 +4010,8 @@ textarea.form-control-lg {
|
||||
}
|
||||
|
||||
.navbar-collapse {
|
||||
flex-basis: 100%;
|
||||
flex-grow: 1;
|
||||
flex-basis: 100%;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
@@ -4531,7 +4506,7 @@ textarea.form-control-lg {
|
||||
flex-flow: row wrap;
|
||||
}
|
||||
.card-group > .card {
|
||||
flex: 1 0 0%;
|
||||
flex: 1 0 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
.card-group > .card + .card {
|
||||
@@ -4542,24 +4517,24 @@ textarea.form-control-lg {
|
||||
border-top-right-radius: 0;
|
||||
border-bottom-right-radius: 0;
|
||||
}
|
||||
.card-group > .card:not(:last-child) .card-img-top,
|
||||
.card-group > .card:not(:last-child) .card-header {
|
||||
.card-group > .card:not(:last-child) > .card-img-top,
|
||||
.card-group > .card:not(:last-child) > .card-header {
|
||||
border-top-right-radius: 0;
|
||||
}
|
||||
.card-group > .card:not(:last-child) .card-img-bottom,
|
||||
.card-group > .card:not(:last-child) .card-footer {
|
||||
.card-group > .card:not(:last-child) > .card-img-bottom,
|
||||
.card-group > .card:not(:last-child) > .card-footer {
|
||||
border-bottom-right-radius: 0;
|
||||
}
|
||||
.card-group > .card:not(:first-child) {
|
||||
border-top-left-radius: 0;
|
||||
border-bottom-left-radius: 0;
|
||||
}
|
||||
.card-group > .card:not(:first-child) .card-img-top,
|
||||
.card-group > .card:not(:first-child) .card-header {
|
||||
.card-group > .card:not(:first-child) > .card-img-top,
|
||||
.card-group > .card:not(:first-child) > .card-header {
|
||||
border-top-left-radius: 0;
|
||||
}
|
||||
.card-group > .card:not(:first-child) .card-img-bottom,
|
||||
.card-group > .card:not(:first-child) .card-footer {
|
||||
.card-group > .card:not(:first-child) > .card-img-bottom,
|
||||
.card-group > .card:not(:first-child) > .card-footer {
|
||||
border-bottom-left-radius: 0;
|
||||
}
|
||||
}
|
||||
@@ -4576,11 +4551,11 @@ textarea.form-control-lg {
|
||||
--bs-accordion-btn-padding-y: 1rem;
|
||||
--bs-accordion-btn-color: var(--bs-body-color);
|
||||
--bs-accordion-btn-bg: var(--bs-accordion-bg);
|
||||
--bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23212529' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e");
|
||||
--bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23212529' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='m2 5 6 6 6-6'/%3e%3c/svg%3e");
|
||||
--bs-accordion-btn-icon-width: 1.25rem;
|
||||
--bs-accordion-btn-icon-transform: rotate(-180deg);
|
||||
--bs-accordion-btn-icon-transition: transform 0.2s ease-in-out;
|
||||
--bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23052c65' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e");
|
||||
--bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23052c65' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='m2 5 6 6 6-6'/%3e%3c/svg%3e");
|
||||
--bs-accordion-btn-focus-box-shadow: 0 0 0 0.25rem rgba(13, 110, 253, 0.25);
|
||||
--bs-accordion-body-padding-x: 1.25rem;
|
||||
--bs-accordion-body-padding-y: 1rem;
|
||||
@@ -4690,16 +4665,15 @@ textarea.form-control-lg {
|
||||
.accordion-flush > .accordion-item:last-child {
|
||||
border-bottom: 0;
|
||||
}
|
||||
.accordion-flush > .accordion-item > .accordion-header .accordion-button, .accordion-flush > .accordion-item > .accordion-header .accordion-button.collapsed {
|
||||
border-radius: 0;
|
||||
}
|
||||
.accordion-flush > .accordion-item > .accordion-collapse {
|
||||
.accordion-flush > .accordion-item > .accordion-collapse,
|
||||
.accordion-flush > .accordion-item > .accordion-header .accordion-button,
|
||||
.accordion-flush > .accordion-item > .accordion-header .accordion-button.collapsed {
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
[data-bs-theme=dark] .accordion-button::after {
|
||||
--bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%236ea8fe'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");
|
||||
--bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%236ea8fe'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");
|
||||
--bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%236ea8fe'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708'/%3e%3c/svg%3e");
|
||||
--bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%236ea8fe'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708'/%3e%3c/svg%3e");
|
||||
}
|
||||
|
||||
.breadcrumb {
|
||||
@@ -4803,7 +4777,7 @@ textarea.form-control-lg {
|
||||
}
|
||||
|
||||
.page-item:not(:first-child) .page-link {
|
||||
margin-left: calc(var(--bs-border-width) * -1);
|
||||
margin-left: calc(-1 * var(--bs-border-width));
|
||||
}
|
||||
.page-item:first-child .page-link {
|
||||
border-top-left-radius: var(--bs-pagination-border-radius);
|
||||
@@ -4952,7 +4926,7 @@ textarea.form-control-lg {
|
||||
|
||||
@keyframes progress-bar-stripes {
|
||||
0% {
|
||||
background-position-x: 1rem;
|
||||
background-position-x: var(--bs-progress-height);
|
||||
}
|
||||
}
|
||||
.progress,
|
||||
@@ -5046,22 +5020,6 @@ textarea.form-control-lg {
|
||||
counter-increment: section;
|
||||
}
|
||||
|
||||
.list-group-item-action {
|
||||
width: 100%;
|
||||
color: var(--bs-list-group-action-color);
|
||||
text-align: inherit;
|
||||
}
|
||||
.list-group-item-action:hover, .list-group-item-action:focus {
|
||||
z-index: 1;
|
||||
color: var(--bs-list-group-action-hover-color);
|
||||
text-decoration: none;
|
||||
background-color: var(--bs-list-group-action-hover-bg);
|
||||
}
|
||||
.list-group-item-action:active {
|
||||
color: var(--bs-list-group-action-active-color);
|
||||
background-color: var(--bs-list-group-action-active-bg);
|
||||
}
|
||||
|
||||
.list-group-item {
|
||||
position: relative;
|
||||
display: block;
|
||||
@@ -5098,6 +5056,22 @@ textarea.form-control-lg {
|
||||
border-top-width: var(--bs-list-group-border-width);
|
||||
}
|
||||
|
||||
.list-group-item-action {
|
||||
width: 100%;
|
||||
color: var(--bs-list-group-action-color);
|
||||
text-align: inherit;
|
||||
}
|
||||
.list-group-item-action:not(.active):hover, .list-group-item-action:not(.active):focus {
|
||||
z-index: 1;
|
||||
color: var(--bs-list-group-action-hover-color);
|
||||
text-decoration: none;
|
||||
background-color: var(--bs-list-group-action-hover-bg);
|
||||
}
|
||||
.list-group-item-action:not(.active):active {
|
||||
color: var(--bs-list-group-action-active-color);
|
||||
background-color: var(--bs-list-group-action-active-bg);
|
||||
}
|
||||
|
||||
.list-group-horizontal {
|
||||
flex-direction: row;
|
||||
}
|
||||
@@ -5357,19 +5331,19 @@ textarea.form-control-lg {
|
||||
|
||||
.btn-close {
|
||||
--bs-btn-close-color: #000;
|
||||
--bs-btn-close-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23000'%3e%3cpath d='M.293.293a1 1 0 0 1 1.414 0L8 6.586 14.293.293a1 1 0 1 1 1.414 1.414L9.414 8l6.293 6.293a1 1 0 0 1-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 0 1-1.414-1.414L6.586 8 .293 1.707a1 1 0 0 1 0-1.414z'/%3e%3c/svg%3e");
|
||||
--bs-btn-close-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23000'%3e%3cpath d='M.293.293a1 1 0 0 1 1.414 0L8 6.586 14.293.293a1 1 0 1 1 1.414 1.414L9.414 8l6.293 6.293a1 1 0 0 1-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 0 1-1.414-1.414L6.586 8 .293 1.707a1 1 0 0 1 0-1.414'/%3e%3c/svg%3e");
|
||||
--bs-btn-close-opacity: 0.5;
|
||||
--bs-btn-close-hover-opacity: 0.75;
|
||||
--bs-btn-close-focus-shadow: 0 0 0 0.25rem rgba(13, 110, 253, 0.25);
|
||||
--bs-btn-close-focus-opacity: 1;
|
||||
--bs-btn-close-disabled-opacity: 0.25;
|
||||
--bs-btn-close-white-filter: invert(1) grayscale(100%) brightness(200%);
|
||||
box-sizing: content-box;
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
padding: 0.25em 0.25em;
|
||||
color: var(--bs-btn-close-color);
|
||||
background: transparent var(--bs-btn-close-bg) center/1em auto no-repeat;
|
||||
filter: var(--bs-btn-close-filter);
|
||||
border: 0;
|
||||
border-radius: 0.375rem;
|
||||
opacity: var(--bs-btn-close-opacity);
|
||||
@@ -5393,11 +5367,16 @@ textarea.form-control-lg {
|
||||
}
|
||||
|
||||
.btn-close-white {
|
||||
filter: var(--bs-btn-close-white-filter);
|
||||
--bs-btn-close-filter: invert(1) grayscale(100%) brightness(200%);
|
||||
}
|
||||
|
||||
[data-bs-theme=dark] .btn-close {
|
||||
filter: var(--bs-btn-close-white-filter);
|
||||
:root,
|
||||
[data-bs-theme=light] {
|
||||
--bs-btn-close-filter: ;
|
||||
}
|
||||
|
||||
[data-bs-theme=dark] {
|
||||
--bs-btn-close-filter: invert(1) grayscale(100%) brightness(200%);
|
||||
}
|
||||
|
||||
.toast {
|
||||
@@ -5474,7 +5453,7 @@ textarea.form-control-lg {
|
||||
--bs-modal-width: 500px;
|
||||
--bs-modal-padding: 1rem;
|
||||
--bs-modal-margin: 0.5rem;
|
||||
--bs-modal-color: ;
|
||||
--bs-modal-color: var(--bs-body-color);
|
||||
--bs-modal-bg: var(--bs-body-bg);
|
||||
--bs-modal-border-color: var(--bs-border-color-translucent);
|
||||
--bs-modal-border-width: var(--bs-border-width);
|
||||
@@ -5510,8 +5489,8 @@ textarea.form-control-lg {
|
||||
pointer-events: none;
|
||||
}
|
||||
.modal.fade .modal-dialog {
|
||||
transition: transform 0.3s ease-out;
|
||||
transform: translate(0, -50px);
|
||||
transition: transform 0.3s ease-out;
|
||||
}
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
.modal.fade .modal-dialog {
|
||||
@@ -5586,7 +5565,10 @@ textarea.form-control-lg {
|
||||
}
|
||||
.modal-header .btn-close {
|
||||
padding: calc(var(--bs-modal-header-padding-y) * 0.5) calc(var(--bs-modal-header-padding-x) * 0.5);
|
||||
margin: calc(-0.5 * var(--bs-modal-header-padding-y)) calc(-0.5 * var(--bs-modal-header-padding-x)) calc(-0.5 * var(--bs-modal-header-padding-y)) auto;
|
||||
margin-top: calc(-0.5 * var(--bs-modal-header-padding-y));
|
||||
margin-right: calc(-0.5 * var(--bs-modal-header-padding-x));
|
||||
margin-bottom: calc(-0.5 * var(--bs-modal-header-padding-y));
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
.modal-title {
|
||||
@@ -6107,6 +6089,7 @@ textarea.form-control-lg {
|
||||
color: #fff;
|
||||
text-align: center;
|
||||
background: none;
|
||||
filter: var(--bs-carousel-control-icon-filter);
|
||||
border: 0;
|
||||
opacity: 0.5;
|
||||
transition: opacity 0.15s ease;
|
||||
@@ -6145,11 +6128,11 @@ textarea.form-control-lg {
|
||||
}
|
||||
|
||||
.carousel-control-prev-icon {
|
||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")*/;
|
||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708'/%3e%3c/svg%3e")*/;
|
||||
}
|
||||
|
||||
.carousel-control-next-icon {
|
||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e")*/;
|
||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0'/%3e%3c/svg%3e")*/;
|
||||
}
|
||||
|
||||
.carousel-indicators {
|
||||
@@ -6175,7 +6158,7 @@ textarea.form-control-lg {
|
||||
margin-left: 3px;
|
||||
text-indent: -999px;
|
||||
cursor: pointer;
|
||||
background-color: #fff;
|
||||
background-color: var(--bs-carousel-indicator-active-bg);
|
||||
background-clip: padding-box;
|
||||
border: 0;
|
||||
border-top: 10px solid transparent;
|
||||
@@ -6199,31 +6182,27 @@ textarea.form-control-lg {
|
||||
left: 15%;
|
||||
padding-top: 1.25rem;
|
||||
padding-bottom: 1.25rem;
|
||||
color: #fff;
|
||||
color: var(--bs-carousel-caption-color);
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.carousel-dark .carousel-control-prev-icon,
|
||||
.carousel-dark .carousel-control-next-icon {
|
||||
filter: invert(1) grayscale(100);
|
||||
}
|
||||
.carousel-dark .carousel-indicators [data-bs-target] {
|
||||
background-color: #000;
|
||||
}
|
||||
.carousel-dark .carousel-caption {
|
||||
color: #000;
|
||||
.carousel-dark {
|
||||
--bs-carousel-indicator-active-bg: #000;
|
||||
--bs-carousel-caption-color: #000;
|
||||
--bs-carousel-control-icon-filter: invert(1) grayscale(100);
|
||||
}
|
||||
|
||||
[data-bs-theme=dark] .carousel .carousel-control-prev-icon,
|
||||
[data-bs-theme=dark] .carousel .carousel-control-next-icon, [data-bs-theme=dark].carousel .carousel-control-prev-icon,
|
||||
[data-bs-theme=dark].carousel .carousel-control-next-icon {
|
||||
filter: invert(1) grayscale(100);
|
||||
:root,
|
||||
[data-bs-theme=light] {
|
||||
--bs-carousel-indicator-active-bg: #fff;
|
||||
--bs-carousel-caption-color: #fff;
|
||||
--bs-carousel-control-icon-filter: ;
|
||||
}
|
||||
[data-bs-theme=dark] .carousel .carousel-indicators [data-bs-target], [data-bs-theme=dark].carousel .carousel-indicators [data-bs-target] {
|
||||
background-color: #000;
|
||||
}
|
||||
[data-bs-theme=dark] .carousel .carousel-caption, [data-bs-theme=dark].carousel .carousel-caption {
|
||||
color: #000;
|
||||
|
||||
[data-bs-theme=dark] {
|
||||
--bs-carousel-indicator-active-bg: #000;
|
||||
--bs-carousel-caption-color: #000;
|
||||
--bs-carousel-control-icon-filter: invert(1) grayscale(100);
|
||||
}
|
||||
|
||||
.spinner-grow,
|
||||
@@ -6773,7 +6752,10 @@ textarea.form-control-lg {
|
||||
}
|
||||
.offcanvas-header .btn-close {
|
||||
padding: calc(var(--bs-offcanvas-padding-y) * 0.5) calc(var(--bs-offcanvas-padding-x) * 0.5);
|
||||
margin: calc(-0.5 * var(--bs-offcanvas-padding-y)) calc(-0.5 * var(--bs-offcanvas-padding-x)) calc(-0.5 * var(--bs-offcanvas-padding-y)) auto;
|
||||
margin-top: calc(-0.5 * var(--bs-offcanvas-padding-y));
|
||||
margin-right: calc(-0.5 * var(--bs-offcanvas-padding-x));
|
||||
margin-bottom: calc(-0.5 * var(--bs-offcanvas-padding-y));
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
.offcanvas-title {
|
||||
@@ -7174,6 +7156,10 @@ textarea.form-control-lg {
|
||||
.visually-hidden-focusable:not(:focus):not(:focus-within):not(caption) {
|
||||
position: absolute !important;
|
||||
}
|
||||
.visually-hidden *,
|
||||
.visually-hidden-focusable:not(:focus):not(:focus-within) * {
|
||||
overflow: hidden !important;
|
||||
}
|
||||
|
||||
.stretched-link::after {
|
||||
position: absolute;
|
||||
|
154
src/static/scripts/datatables.css
vendored
154
src/static/scripts/datatables.css
vendored
@@ -4,13 +4,12 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs5/dt-2.1.8
|
||||
* https://datatables.net/download/#bs5/dt-2.3.2
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 2.1.8
|
||||
* DataTables 2.3.2
|
||||
*/
|
||||
|
||||
@charset "UTF-8";
|
||||
:root {
|
||||
--dt-row-selected: 13, 110, 253;
|
||||
--dt-row-selected-text: 255, 255, 255;
|
||||
@@ -18,17 +17,18 @@
|
||||
--dt-row-stripe: 0, 0, 0;
|
||||
--dt-row-hover: 0, 0, 0;
|
||||
--dt-column-ordering: 0, 0, 0;
|
||||
--dt-header-align-items: center;
|
||||
--dt-html-background: white;
|
||||
}
|
||||
:root.dark {
|
||||
--dt-html-background: rgb(33, 37, 41);
|
||||
}
|
||||
|
||||
table.dataTable td.dt-control {
|
||||
table.dataTable tbody td.dt-control {
|
||||
text-align: center;
|
||||
cursor: pointer;
|
||||
}
|
||||
table.dataTable td.dt-control:before {
|
||||
table.dataTable tbody td.dt-control:before {
|
||||
display: inline-block;
|
||||
box-sizing: border-box;
|
||||
content: "";
|
||||
@@ -37,12 +37,15 @@ table.dataTable td.dt-control:before {
|
||||
border-bottom: 5px solid transparent;
|
||||
border-right: 0px solid transparent;
|
||||
}
|
||||
table.dataTable tr.dt-hasChild td.dt-control:before {
|
||||
table.dataTable tbody tr.dt-hasChild td.dt-control:before {
|
||||
border-top: 10px solid rgba(0, 0, 0, 0.5);
|
||||
border-left: 5px solid transparent;
|
||||
border-bottom: 0px solid transparent;
|
||||
border-right: 5px solid transparent;
|
||||
}
|
||||
table.dataTable tfoot:empty {
|
||||
display: none;
|
||||
}
|
||||
|
||||
html.dark table.dataTable td.dt-control:before,
|
||||
:root[data-bs-theme=dark] table.dataTable td.dt-control:before,
|
||||
@@ -90,8 +93,8 @@ table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before {
|
||||
position: absolute;
|
||||
display: block;
|
||||
bottom: 50%;
|
||||
content: "▲";
|
||||
content: "▲"/"";
|
||||
content: "\25B2";
|
||||
content: "\25B2"/"";
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:after,
|
||||
@@ -99,27 +102,17 @@ table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
|
||||
position: absolute;
|
||||
display: block;
|
||||
top: 50%;
|
||||
content: "▼";
|
||||
content: "▼"/"";
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-orderable-asc, table.dataTable thead > tr > th.dt-orderable-desc, table.dataTable thead > tr > th.dt-ordering-asc, table.dataTable thead > tr > th.dt-ordering-desc,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc {
|
||||
position: relative;
|
||||
padding-right: 30px;
|
||||
content: "\25BC";
|
||||
content: "\25BC"/"";
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order {
|
||||
position: absolute;
|
||||
right: 12px;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
position: relative;
|
||||
width: 12px;
|
||||
height: 20px;
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:after, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:before,
|
||||
@@ -161,6 +154,40 @@ table.dataTable thead > tr > td:active {
|
||||
outline: none;
|
||||
}
|
||||
|
||||
table.dataTable thead > tr > th div.dt-column-header,
|
||||
table.dataTable thead > tr > th div.dt-column-footer,
|
||||
table.dataTable thead > tr > td div.dt-column-header,
|
||||
table.dataTable thead > tr > td div.dt-column-footer,
|
||||
table.dataTable tfoot > tr > th div.dt-column-header,
|
||||
table.dataTable tfoot > tr > th div.dt-column-footer,
|
||||
table.dataTable tfoot > tr > td div.dt-column-header,
|
||||
table.dataTable tfoot > tr > td div.dt-column-footer {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: var(--dt-header-align-items);
|
||||
gap: 4px;
|
||||
}
|
||||
table.dataTable thead > tr > th div.dt-column-header span.dt-column-title,
|
||||
table.dataTable thead > tr > th div.dt-column-footer span.dt-column-title,
|
||||
table.dataTable thead > tr > td div.dt-column-header span.dt-column-title,
|
||||
table.dataTable thead > tr > td div.dt-column-footer span.dt-column-title,
|
||||
table.dataTable tfoot > tr > th div.dt-column-header span.dt-column-title,
|
||||
table.dataTable tfoot > tr > th div.dt-column-footer span.dt-column-title,
|
||||
table.dataTable tfoot > tr > td div.dt-column-header span.dt-column-title,
|
||||
table.dataTable tfoot > tr > td div.dt-column-footer span.dt-column-title {
|
||||
flex-grow: 1;
|
||||
}
|
||||
table.dataTable thead > tr > th div.dt-column-header span.dt-column-title:empty,
|
||||
table.dataTable thead > tr > th div.dt-column-footer span.dt-column-title:empty,
|
||||
table.dataTable thead > tr > td div.dt-column-header span.dt-column-title:empty,
|
||||
table.dataTable thead > tr > td div.dt-column-footer span.dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > th div.dt-column-header span.dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > th div.dt-column-footer span.dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > td div.dt-column-header span.dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > td div.dt-column-footer span.dt-column-title:empty {
|
||||
display: none;
|
||||
}
|
||||
|
||||
div.dt-scroll-body > table.dataTable > thead > tr > th,
|
||||
div.dt-scroll-body > table.dataTable > thead > tr > td {
|
||||
overflow: hidden;
|
||||
@@ -251,10 +278,30 @@ table.dataTable th,
|
||||
table.dataTable td {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
table.dataTable th.dt-type-numeric, table.dataTable th.dt-type-date,
|
||||
table.dataTable td.dt-type-numeric,
|
||||
table.dataTable td.dt-type-date {
|
||||
text-align: right;
|
||||
}
|
||||
table.dataTable th.dt-type-numeric div.dt-column-header,
|
||||
table.dataTable th.dt-type-numeric div.dt-column-footer, table.dataTable th.dt-type-date div.dt-column-header,
|
||||
table.dataTable th.dt-type-date div.dt-column-footer,
|
||||
table.dataTable td.dt-type-numeric div.dt-column-header,
|
||||
table.dataTable td.dt-type-numeric div.dt-column-footer,
|
||||
table.dataTable td.dt-type-date div.dt-column-header,
|
||||
table.dataTable td.dt-type-date div.dt-column-footer {
|
||||
flex-direction: row-reverse;
|
||||
}
|
||||
table.dataTable th.dt-left,
|
||||
table.dataTable td.dt-left {
|
||||
text-align: left;
|
||||
}
|
||||
table.dataTable th.dt-left div.dt-column-header,
|
||||
table.dataTable th.dt-left div.dt-column-footer,
|
||||
table.dataTable td.dt-left div.dt-column-header,
|
||||
table.dataTable td.dt-left div.dt-column-footer {
|
||||
flex-direction: row;
|
||||
}
|
||||
table.dataTable th.dt-center,
|
||||
table.dataTable td.dt-center {
|
||||
text-align: center;
|
||||
@@ -263,10 +310,22 @@ table.dataTable th.dt-right,
|
||||
table.dataTable td.dt-right {
|
||||
text-align: right;
|
||||
}
|
||||
table.dataTable th.dt-right div.dt-column-header,
|
||||
table.dataTable th.dt-right div.dt-column-footer,
|
||||
table.dataTable td.dt-right div.dt-column-header,
|
||||
table.dataTable td.dt-right div.dt-column-footer {
|
||||
flex-direction: row-reverse;
|
||||
}
|
||||
table.dataTable th.dt-justify,
|
||||
table.dataTable td.dt-justify {
|
||||
text-align: justify;
|
||||
}
|
||||
table.dataTable th.dt-justify div.dt-column-header,
|
||||
table.dataTable th.dt-justify div.dt-column-footer,
|
||||
table.dataTable td.dt-justify div.dt-column-header,
|
||||
table.dataTable td.dt-justify div.dt-column-footer {
|
||||
flex-direction: row;
|
||||
}
|
||||
table.dataTable th.dt-nowrap,
|
||||
table.dataTable td.dt-nowrap {
|
||||
white-space: nowrap;
|
||||
@@ -276,11 +335,6 @@ table.dataTable td.dt-empty {
|
||||
text-align: center;
|
||||
vertical-align: top;
|
||||
}
|
||||
table.dataTable th.dt-type-numeric, table.dataTable th.dt-type-date,
|
||||
table.dataTable td.dt-type-numeric,
|
||||
table.dataTable td.dt-type-date {
|
||||
text-align: right;
|
||||
}
|
||||
table.dataTable thead th,
|
||||
table.dataTable thead td,
|
||||
table.dataTable tfoot th,
|
||||
@@ -293,6 +347,16 @@ table.dataTable tfoot th.dt-head-left,
|
||||
table.dataTable tfoot td.dt-head-left {
|
||||
text-align: left;
|
||||
}
|
||||
table.dataTable thead th.dt-head-left div.dt-column-header,
|
||||
table.dataTable thead th.dt-head-left div.dt-column-footer,
|
||||
table.dataTable thead td.dt-head-left div.dt-column-header,
|
||||
table.dataTable thead td.dt-head-left div.dt-column-footer,
|
||||
table.dataTable tfoot th.dt-head-left div.dt-column-header,
|
||||
table.dataTable tfoot th.dt-head-left div.dt-column-footer,
|
||||
table.dataTable tfoot td.dt-head-left div.dt-column-header,
|
||||
table.dataTable tfoot td.dt-head-left div.dt-column-footer {
|
||||
flex-direction: row;
|
||||
}
|
||||
table.dataTable thead th.dt-head-center,
|
||||
table.dataTable thead td.dt-head-center,
|
||||
table.dataTable tfoot th.dt-head-center,
|
||||
@@ -305,12 +369,32 @@ table.dataTable tfoot th.dt-head-right,
|
||||
table.dataTable tfoot td.dt-head-right {
|
||||
text-align: right;
|
||||
}
|
||||
table.dataTable thead th.dt-head-right div.dt-column-header,
|
||||
table.dataTable thead th.dt-head-right div.dt-column-footer,
|
||||
table.dataTable thead td.dt-head-right div.dt-column-header,
|
||||
table.dataTable thead td.dt-head-right div.dt-column-footer,
|
||||
table.dataTable tfoot th.dt-head-right div.dt-column-header,
|
||||
table.dataTable tfoot th.dt-head-right div.dt-column-footer,
|
||||
table.dataTable tfoot td.dt-head-right div.dt-column-header,
|
||||
table.dataTable tfoot td.dt-head-right div.dt-column-footer {
|
||||
flex-direction: row-reverse;
|
||||
}
|
||||
table.dataTable thead th.dt-head-justify,
|
||||
table.dataTable thead td.dt-head-justify,
|
||||
table.dataTable tfoot th.dt-head-justify,
|
||||
table.dataTable tfoot td.dt-head-justify {
|
||||
text-align: justify;
|
||||
}
|
||||
table.dataTable thead th.dt-head-justify div.dt-column-header,
|
||||
table.dataTable thead th.dt-head-justify div.dt-column-footer,
|
||||
table.dataTable thead td.dt-head-justify div.dt-column-header,
|
||||
table.dataTable thead td.dt-head-justify div.dt-column-footer,
|
||||
table.dataTable tfoot th.dt-head-justify div.dt-column-header,
|
||||
table.dataTable tfoot th.dt-head-justify div.dt-column-footer,
|
||||
table.dataTable tfoot td.dt-head-justify div.dt-column-header,
|
||||
table.dataTable tfoot td.dt-head-justify div.dt-column-footer {
|
||||
flex-direction: row;
|
||||
}
|
||||
table.dataTable thead th.dt-head-nowrap,
|
||||
table.dataTable thead td.dt-head-nowrap,
|
||||
table.dataTable tfoot th.dt-head-nowrap,
|
||||
@@ -338,6 +422,10 @@ table.dataTable tbody td.dt-body-nowrap {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
:root {
|
||||
--dt-header-align-items: flex-end;
|
||||
}
|
||||
|
||||
/*! Bootstrap 5 integration for DataTables
|
||||
*
|
||||
* ©2020 SpryMedia Ltd, all rights reserved.
|
||||
@@ -408,6 +496,9 @@ div.dt-container div.dt-layout-table > div {
|
||||
margin-left: 0;
|
||||
}
|
||||
}
|
||||
div.dt-container {
|
||||
position: relative;
|
||||
}
|
||||
div.dt-container div.dt-length label {
|
||||
font-weight: normal;
|
||||
text-align: left;
|
||||
@@ -496,14 +587,19 @@ table.dataTable.table-sm > thead > tr td.dt-orderable-asc,
|
||||
table.dataTable.table-sm > thead > tr td.dt-orderable-desc,
|
||||
table.dataTable.table-sm > thead > tr td.dt-ordering-asc,
|
||||
table.dataTable.table-sm > thead > tr td.dt-ordering-desc {
|
||||
padding-right: 20px;
|
||||
padding-right: 0.25rem;
|
||||
}
|
||||
table.dataTable.table-sm > thead > tr th.dt-orderable-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-orderable-desc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-desc span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-orderable-asc span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-orderable-desc span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-ordering-asc span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-ordering-desc span.dt-column-order {
|
||||
right: 5px;
|
||||
right: 0.25rem;
|
||||
}
|
||||
table.dataTable.table-sm > thead > tr th.dt-type-date span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-type-numeric span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-type-date span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-type-numeric span.dt-column-order {
|
||||
left: 0.25rem;
|
||||
}
|
||||
|
||||
div.dt-scroll-head table.table-bordered {
|
||||
|
911
src/static/scripts/datatables.js
vendored
911
src/static/scripts/datatables.js
vendored
File diff suppressed because it is too large
Load Diff
@@ -7,35 +7,34 @@
|
||||
<div class="col-md">
|
||||
<dl class="row">
|
||||
<dt class="col-sm-5">Server Installed
|
||||
<span class="badge bg-success d-none" id="server-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none" id="server-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none" id="server-branch" title="This is a branched version.">Branched</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="server-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none abbr-badge" id="server-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none abbr-badge" id="server-branch" title="This is a branched version.">Branched</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="server-installed">{{page_data.current_release}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Server Latest
|
||||
<span class="badge bg-secondary d-none" id="server-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
<span class="badge bg-secondary d-none abbr-badge" id="server-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="server-latest">{{page_data.latest_release}}<span id="server-latest-commit" class="d-none">-{{page_data.latest_commit}}</span></span>
|
||||
</dd>
|
||||
{{#if page_data.web_vault_enabled}}
|
||||
<dt class="col-sm-5">Web Installed
|
||||
<span class="badge bg-success d-none" id="web-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none" id="web-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="web-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none abbr-badge" id="web-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none abbr-badge" id="web-prerelease" title="You seem to be using a pre-release version.">Pre-Release</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="web-installed">{{page_data.web_vault_version}}</span>
|
||||
</dd>
|
||||
{{#unless page_data.running_within_container}}
|
||||
<dt class="col-sm-5">Web Latest
|
||||
<span class="badge bg-secondary d-none" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
<span class="badge bg-secondary d-none abbr-badge" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="web-latest">{{page_data.latest_web_build}}</span>
|
||||
</dd>
|
||||
{{/unless}}
|
||||
{{/if}}
|
||||
{{#unless page_data.web_vault_enabled}}
|
||||
<dt class="col-sm-5">Web Installed</dt>
|
||||
@@ -68,10 +67,11 @@
|
||||
<span class="d-block"><b>No</b></span>
|
||||
{{/unless}}
|
||||
</dd>
|
||||
<dt class="col-sm-5">Environment settings overridden</dt>
|
||||
<dt class="col-sm-5">Uses config.json</dt>
|
||||
<dd class="col-sm-7">
|
||||
{{#if page_data.overrides}}
|
||||
<span class="d-block" title="The following settings are overridden: {{page_data.overrides}}"><b>Yes</b></span>
|
||||
<span class="d-inline"><b>Yes</b></span>
|
||||
<span class="badge bg-info text-dark abbr-badge" title="Environment variables are overwritten by a config.json.
{{page_data.overrides}}">Details</span>
|
||||
{{/if}}
|
||||
{{#unless page_data.overrides}}
|
||||
<span class="d-block"><b>No</b></span>
|
||||
@@ -90,10 +90,10 @@
|
||||
{{#if page_data.ip_header_exists}}
|
||||
<dt class="col-sm-5">IP header
|
||||
{{#if page_data.ip_header_match}}
|
||||
<span class="badge bg-success" title="IP_HEADER config seems to be valid.">Match</span>
|
||||
<span class="badge bg-success abbr-badge" title="IP_HEADER config seems to be valid.">Match</span>
|
||||
{{/if}}
|
||||
{{#unless page_data.ip_header_match}}
|
||||
<span class="badge bg-danger" title="IP_HEADER config seems to be invalid. IP's in the log could be invalid. Please fix.">No Match</span>
|
||||
<span class="badge bg-danger abbr-badge" title="IP_HEADER config seems to be invalid. IP's in the log could be invalid. Please fix.">No Match</span>
|
||||
{{/unless}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
@@ -109,10 +109,10 @@
|
||||
{{!-- End if IP Header Exists --}}
|
||||
<dt class="col-sm-5">Internet access
|
||||
{{#if page_data.has_http_access}}
|
||||
<span class="badge bg-success" title="We have internet access!">Ok</span>
|
||||
<span class="badge bg-success abbr-badge" title="We have internet access!">Ok</span>
|
||||
{{/if}}
|
||||
{{#unless page_data.has_http_access}}
|
||||
<span class="badge bg-danger" title="There seems to be no internet access. Please fix.">Error</span>
|
||||
<span class="badge bg-danger abbr-badge" title="There seems to be no internet access. Please fix.">Error</span>
|
||||
{{/unless}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
@@ -134,8 +134,8 @@
|
||||
</dd>
|
||||
<dt class="col-sm-5">Websocket enabled
|
||||
{{#if page_data.enable_websocket}}
|
||||
<span class="badge bg-success d-none" id="websocket-success" title="Websocket connection is working.">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="websocket-error" title="Websocket connection error, validate your reverse proxy configuration!">Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="websocket-success" title="Websocket connection is working.">Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="websocket-error" title="Websocket connection error, validate your reverse proxy configuration!">Error</span>
|
||||
{{/if}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
@@ -148,23 +148,27 @@
|
||||
</dd>
|
||||
|
||||
<dt class="col-sm-5">DNS (github.com)
|
||||
<span class="badge bg-success d-none" id="dns-success" title="DNS Resolving works!">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="dns-success" title="DNS Resolving works!">Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="dns-resolved">{{page_data.dns_resolved}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Date & Time (Local)</dt>
|
||||
<dt class="col-sm-5">Date & Time (Local)
|
||||
{{#if page_data.tz_env}}
|
||||
<span class="badge bg-success abbr-badge" title="Configured TZ environment variable">{{page_data.tz_env}}</span>
|
||||
{{/if}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span><b>Server:</b> {{page_data.server_time_local}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Date & Time (UTC)
|
||||
<span class="badge bg-success d-none" id="time-success" title="Server and browser times are within 15 seconds of each other.">Server/Browser Ok</span>
|
||||
<span class="badge bg-danger d-none" id="time-warning" title="Server and browser times are more than 15 seconds apart.">Server/Browser Error</span>
|
||||
<span class="badge bg-success d-none" id="ntp-server-success" title="Server and NTP times are within 15 seconds of each other.">Server NTP Ok</span>
|
||||
<span class="badge bg-danger d-none" id="ntp-server-warning" title="Server and NTP times are more than 15 seconds apart.">Server NTP Error</span>
|
||||
<span class="badge bg-success d-none" id="ntp-browser-success" title="Browser and NTP times are within 15 seconds of each other.">Browser NTP Ok</span>
|
||||
<span class="badge bg-danger d-none" id="ntp-browser-warning" title="Browser and NTP times are more than 15 seconds apart.">Browser NTP Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="time-success" title="Server and browser times are within 15 seconds of each other.">Server/Browser Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="time-warning" title="Server and browser times are more than 15 seconds apart.">Server/Browser Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="ntp-server-success" title="Server and NTP times are within 15 seconds of each other.">Server NTP Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="ntp-server-warning" title="Server and NTP times are more than 15 seconds apart.">Server NTP Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="ntp-browser-success" title="Browser and NTP times are within 15 seconds of each other.">Browser NTP Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="ntp-browser-warning" title="Browser and NTP times are more than 15 seconds apart.">Browser NTP Error</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="ntp-time" class="d-block"><b>NTP:</b> <span id="ntp-server-string">{{page_data.ntp_time}}</span></span>
|
||||
@@ -173,10 +177,10 @@
|
||||
</dd>
|
||||
|
||||
<dt class="col-sm-5">Domain configuration
|
||||
<span class="badge bg-success d-none" id="domain-success" title="The domain variable matches the browser location and seems to be configured correctly.">Match</span>
|
||||
<span class="badge bg-danger d-none" id="domain-warning" title="The domain variable does not match the browser location.
The domain variable does not seem to be configured correctly.
Some features may not work as expected!">No Match</span>
|
||||
<span class="badge bg-success d-none" id="https-success" title="Configured to use HTTPS">HTTPS</span>
|
||||
<span class="badge bg-danger d-none" id="https-warning" title="Not configured to use HTTPS.
Some features may not work as expected!">No HTTPS</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="domain-success" title="The domain variable matches the browser location and seems to be configured correctly.">Match</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="domain-warning" title="The domain variable does not match the browser location.
The domain variable does not seem to be configured correctly.
Some features may not work as expected!">No Match</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="https-success" title="Configured to use HTTPS">HTTPS</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="https-warning" title="Not configured to use HTTPS.
Some features may not work as expected!">No HTTPS</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="domain-server" class="d-block"><b>Server:</b> <span id="domain-server-string">{{page_data.admin_url}}</span></span>
|
||||
@@ -184,8 +188,8 @@
|
||||
</dd>
|
||||
|
||||
<dt class="col-sm-5">HTTP Response validation
|
||||
<span class="badge bg-success d-none" id="http-response-success" title="All headers and HTTP request responses seem to be ok.">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="http-response-warning" title="Some headers or HTTP request responses return invalid data!">Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="http-response-success" title="All headers and HTTP request responses seem to be ok.">Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="http-response-warning" title="Some headers or HTTP request responses return invalid data!">Error</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="http-response-errors" class="d-block"></span>
|
||||
|
@@ -43,7 +43,7 @@
|
||||
<span class="d-block"><strong>Groups:</strong> {{group_count}}</span>
|
||||
<span class="d-block"><strong>Events:</strong> {{event_count}}</span>
|
||||
</td>
|
||||
<td class="text-end px-0 small">
|
||||
<td class="text-end px-1 small">
|
||||
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-delete-organization data-vw-org-uuid="{{id}}" data-vw-org-name="{{name}}" data-vw-billing-email="{{billingEmail}}">Delete Organization</button><br>
|
||||
</td>
|
||||
</tr>
|
||||
|
@@ -60,7 +60,7 @@
|
||||
{{/each}}
|
||||
</div>
|
||||
</td>
|
||||
<td class="text-end px-0 small">
|
||||
<td class="text-end px-1 small">
|
||||
<span data-vw-user-uuid="{{id}}" data-vw-user-email="{{email}}">
|
||||
{{#if twoFactorEnabled}}
|
||||
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-remove2fa>Remove all 2FA</button><br>
|
||||
|
6
src/static/templates/email/change_email_existing.hbs
Normal file
6
src/static/templates/email/change_email_existing.hbs
Normal file
@@ -0,0 +1,6 @@
|
||||
Your Email Change
|
||||
<!---------------->
|
||||
A user ({{ acting_address }}) recently tried to change their account to use this email address ({{ existing_address }}). An account already exists with this email ({{ existing_address }}).
|
||||
|
||||
If you did not try to change an email address, contact your administrator.
|
||||
{{> email/email_footer_text }}
|
16
src/static/templates/email/change_email_existing.html.hbs
Normal file
16
src/static/templates/email/change_email_existing.html.hbs
Normal file
@@ -0,0 +1,16 @@
|
||||
Your Email Change
|
||||
<!---------------->
|
||||
{{> email/email_header }}
|
||||
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||
A user ({{ acting_address }}) recently tried to change their account to use this email address ({{ existing_address }}). An account already exists with this email ({{ existing_address }}).
|
||||
</td>
|
||||
</tr>
|
||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||
If you did not try to change an email address, contact your administrator.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{{> email/email_footer }}
|
8
src/static/templates/email/register_verify_email.hbs
Normal file
8
src/static/templates/email/register_verify_email.hbs
Normal file
@@ -0,0 +1,8 @@
|
||||
Verify Your Email
|
||||
<!---------------->
|
||||
Verify this email address to finish creating your account by clicking the link below.
|
||||
|
||||
Verify Email Address Now: {{{url}}}
|
||||
|
||||
If you did not request to verify your account, you can safely ignore this email.
|
||||
{{> email/email_footer_text }}
|
24
src/static/templates/email/register_verify_email.html.hbs
Normal file
24
src/static/templates/email/register_verify_email.html.hbs
Normal file
@@ -0,0 +1,24 @@
|
||||
Verify Your Email
|
||||
<!---------------->
|
||||
{{> email/email_header }}
|
||||
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||
Verify this email address to finish creating your account by clicking the link below.
|
||||
</td>
|
||||
</tr>
|
||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||
<a href="{{{url}}}"
|
||||
clicktracking=off target="_blank" style="color: #ffffff; text-decoration: none; text-align: center; cursor: pointer; display: inline-block; border-radius: 5px; background-color: #3c8dbc; border-color: #3c8dbc; border-style: solid; border-width: 10px 20px; margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
Verify Email Address Now
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||
If you did not request to verify your account, you can safely ignore this email.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{{> email/email_footer }}
|
@@ -1,7 +1,7 @@
|
||||
Removed from {{{org_name}}}
|
||||
Your access to {{{org_name}}} has been revoked.
|
||||
<!---------------->
|
||||
You have been removed from organization *{{org_name}}* because your account does not have Two-step Login enabled.
|
||||
Your user account has been removed from the *{{org_name}}* organization because you do not have two-step login configured.
|
||||
Before you can re-join this organization you need to set up two-step login on your user account.
|
||||
|
||||
|
||||
You can enable Two-step Login in your account settings.
|
||||
You can enable two-step login in your account settings.
|
||||
{{> email/email_footer_text }}
|
||||
|
@@ -1,15 +1,16 @@
|
||||
Removed from {{{org_name}}}
|
||||
Your access to {{{org_name}}} has been revoked.
|
||||
<!---------------->
|
||||
{{> email/email_header }}
|
||||
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||
You have been removed from organization <b style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">{{org_name}}</b> because your account does not have Two-step Login enabled.
|
||||
Your user account has been removed from the <b style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">{{org_name}}</b> organization because you do not have two-step login configured.<br>
|
||||
Before you can re-join this organization you need to set up two-step login on your user account.
|
||||
</td>
|
||||
</tr>
|
||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||
You can enable Two-step Login in your account settings.
|
||||
You can enable two-step login in your account settings.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
@@ -20,8 +20,50 @@ a[href$="/settings/sponsored-families"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide the sso `Email` input field */
|
||||
.vw-email-sso {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide the `Enterprise Single Sign-On` button on the login page */
|
||||
a[routerlink="/sso"] {
|
||||
{{#if (webver ">=2025.5.1")}}
|
||||
.vw-sso-login {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{else}}
|
||||
app-root ng-component > form > div:nth-child(1) > div > button[buttontype="secondary"].\!tw-text-primary-600:nth-child(4) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/if}}
|
||||
|
||||
/* Hide the `Log in with passkey` settings */
|
||||
app-change-password app-webauthn-login-settings {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
/* Hide Log in with passkey on the login page */
|
||||
{{#if (webver ">=2025.5.1")}}
|
||||
.vw-passkey-login {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{else}}
|
||||
app-root ng-component > form > div:nth-child(1) > div > button[buttontype="secondary"].\!tw-text-primary-600:nth-child(3) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/if}}
|
||||
|
||||
/* Hide the or text followed by the two buttons hidden above */
|
||||
{{#if (webver ">=2025.5.1")}}
|
||||
.vw-or-text {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{else}}
|
||||
app-root ng-component > form > div:nth-child(1) > div:nth-child(3) > div:nth-child(2) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/if}}
|
||||
|
||||
/* Hide the `Other` button on the login page */
|
||||
.vw-other-login {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
@@ -31,29 +73,13 @@ a[href$="/settings/two-factor"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Business Owned checkbox */
|
||||
app-org-info > form:nth-child(1) > div:nth-child(3) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide the `This account is owned by a business` checkbox and label */
|
||||
#ownedBusiness,
|
||||
label[for^="ownedBusiness"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Business Name */
|
||||
app-org-account form div bit-form-field.tw-block:nth-child(3) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide organization plans */
|
||||
app-organization-plans > form > bit-section:nth-child(2) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Collection Management Form */
|
||||
app-org-account form.ng-untouched:nth-child(6) {
|
||||
app-org-account form.ng-untouched:nth-child(5) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
@@ -73,11 +99,6 @@ bit-dialog div.tw-col-span-4:has(input[formcontrolname*="access"], input[formcon
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Log in with passkey */
|
||||
app-login div.tw-flex:nth-child(4) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Change collapsed menu icon to Vaultwarden */
|
||||
bit-nav-logo bit-nav-item a:before {
|
||||
content: "";
|
||||
@@ -93,23 +114,34 @@ bit-nav-logo bit-nav-item .bwi-shield {
|
||||
/**** END Static Vaultwarden Changes ****/
|
||||
/**** START Dynamic Vaultwarden Changes ****/
|
||||
{{#if signup_disabled}}
|
||||
/* From web vault 2025.1.2 and onwards, the signup button is hidden
|
||||
when signups are disabled as the web vault checks the /api/config endpoint.
|
||||
Note that the clients tend to cache this endpoint for about 1 hour, so it might
|
||||
take a while for the change to take effect. To avoid the button appearing
|
||||
when it shouldn't, we'll keep this style in place for a couple of versions */
|
||||
/* Hide the register link on the login screen */
|
||||
{{#if (webver "<2025.3.0")}}
|
||||
app-login form div + div + div + div + hr,
|
||||
app-login form div + div + div + div + hr + p {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{else}}
|
||||
app-root a[routerlink="/signup"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/if}}
|
||||
{{/if}}
|
||||
|
||||
{{#unless mail_enabled}}
|
||||
{{#unless mail_2fa_enabled}}
|
||||
/* Hide `Email` 2FA if mail is not enabled */
|
||||
app-two-factor-setup ul.list-group.list-group-2fa li.list-group-item:nth-child(1) {
|
||||
.providers-2fa-1 {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/unless}}
|
||||
|
||||
{{#unless yubico_enabled}}
|
||||
/* Hide `YubiKey OTP security key` 2FA if it is not enabled */
|
||||
app-two-factor-setup ul.list-group.list-group-2fa li.list-group-item:nth-child(4) {
|
||||
.providers-2fa-3 {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/unless}}
|
||||
|
162
src/util.rs
162
src/util.rs
@@ -16,7 +16,7 @@ use tokio::{
|
||||
time::{sleep, Duration},
|
||||
};
|
||||
|
||||
use crate::CONFIG;
|
||||
use crate::{config::PathType, CONFIG};
|
||||
|
||||
pub struct AppHeaders();
|
||||
|
||||
@@ -56,60 +56,73 @@ impl Fairing for AppHeaders {
|
||||
res.set_raw_header("X-Content-Type-Options", "nosniff");
|
||||
res.set_raw_header("X-Robots-Tag", "noindex, nofollow");
|
||||
|
||||
if !res.headers().get_one("Content-Type").is_some_and(|v| v.starts_with("image/")) {
|
||||
res.set_raw_header("Cross-Origin-Resource-Policy", "same-origin");
|
||||
}
|
||||
|
||||
// Obsolete in modern browsers, unsafe (XS-Leak), and largely replaced by CSP
|
||||
res.set_raw_header("X-XSS-Protection", "0");
|
||||
|
||||
// The `Cross-Origin-Resource-Policy` header should not be set on images or on the `icon_external` route.
|
||||
// Otherwise some clients, like the Bitwarden Desktop, will fail to download the icons
|
||||
let mut is_image = true;
|
||||
if !(res.headers().get_one("Content-Type").is_some_and(|v| v.starts_with("image/"))
|
||||
|| req.route().is_some_and(|v| v.name.as_deref() == Some("icon_external")))
|
||||
{
|
||||
is_image = false;
|
||||
res.set_raw_header("Cross-Origin-Resource-Policy", "same-origin");
|
||||
}
|
||||
|
||||
// Do not send the Content-Security-Policy (CSP) Header and X-Frame-Options for the *-connector.html files.
|
||||
// This can cause issues when some MFA requests needs to open a popup or page within the clients like WebAuthn, or Duo.
|
||||
// This is the same behavior as upstream Bitwarden.
|
||||
if !req_uri_path.ends_with("connector.html") {
|
||||
// # Frame Ancestors:
|
||||
// Chrome Web Store: https://chrome.google.com/webstore/detail/bitwarden-free-password-m/nngceckbapebfimnlniiiahkandclblb
|
||||
// Edge Add-ons: https://microsoftedge.microsoft.com/addons/detail/bitwarden-free-password/jbkfoedolllekgbhcbcoahefnbanhhlh?hl=en-US
|
||||
// Firefox Browser Add-ons: https://addons.mozilla.org/en-US/firefox/addon/bitwarden-password-manager/
|
||||
// # img/child/frame src:
|
||||
// Have I Been Pwned to allow those calls to work.
|
||||
// # Connect src:
|
||||
// Leaked Passwords check: api.pwnedpasswords.com
|
||||
// 2FA/MFA Site check: api.2fa.directory
|
||||
// # Mail Relay: https://bitwarden.com/blog/add-privacy-and-security-using-email-aliases-with-bitwarden/
|
||||
// app.simplelogin.io, app.addy.io, api.fastmail.com, quack.duckduckgo.com
|
||||
let csp = format!(
|
||||
"default-src 'none'; \
|
||||
font-src 'self'; \
|
||||
manifest-src 'self'; \
|
||||
base-uri 'self'; \
|
||||
form-action 'self'; \
|
||||
object-src 'self' blob:; \
|
||||
script-src 'self' 'wasm-unsafe-eval'; \
|
||||
style-src 'self' 'unsafe-inline'; \
|
||||
child-src 'self' https://*.duosecurity.com https://*.duofederal.com; \
|
||||
frame-src 'self' https://*.duosecurity.com https://*.duofederal.com; \
|
||||
frame-ancestors 'self' \
|
||||
chrome-extension://nngceckbapebfimnlniiiahkandclblb \
|
||||
chrome-extension://jbkfoedolllekgbhcbcoahefnbanhhlh \
|
||||
moz-extension://* \
|
||||
{allowed_iframe_ancestors}; \
|
||||
img-src 'self' data: \
|
||||
https://haveibeenpwned.com \
|
||||
{icon_service_csp}; \
|
||||
connect-src 'self' \
|
||||
https://api.pwnedpasswords.com \
|
||||
https://api.2fa.directory \
|
||||
https://app.simplelogin.io/api/ \
|
||||
https://app.addy.io/api/ \
|
||||
https://api.fastmail.com/ \
|
||||
https://api.forwardemail.net \
|
||||
{allowed_connect_src};\
|
||||
",
|
||||
icon_service_csp = CONFIG._icon_service_csp(),
|
||||
allowed_iframe_ancestors = CONFIG.allowed_iframe_ancestors(),
|
||||
allowed_connect_src = CONFIG.allowed_connect_src(),
|
||||
);
|
||||
let csp = if is_image {
|
||||
// Prevent scripts, frames, objects, etc., from loading with images, mainly for SVG images, since these could contain JavaScript and other unsafe items.
|
||||
// Even though we sanitize SVG images before storing and viewing them, it's better to prevent allowing these elements.
|
||||
String::from("default-src 'none'; img-src 'self' data:; style-src 'unsafe-inline'; script-src 'none'; frame-src 'none'; object-src 'none")
|
||||
} else {
|
||||
// # Frame Ancestors:
|
||||
// Chrome Web Store: https://chrome.google.com/webstore/detail/bitwarden-free-password-m/nngceckbapebfimnlniiiahkandclblb
|
||||
// Edge Add-ons: https://microsoftedge.microsoft.com/addons/detail/bitwarden-free-password/jbkfoedolllekgbhcbcoahefnbanhhlh?hl=en-US
|
||||
// Firefox Browser Add-ons: https://addons.mozilla.org/en-US/firefox/addon/bitwarden-password-manager/
|
||||
// # img/child/frame src:
|
||||
// Have I Been Pwned to allow those calls to work.
|
||||
// # Connect src:
|
||||
// Leaked Passwords check: api.pwnedpasswords.com
|
||||
// 2FA/MFA Site check: api.2fa.directory
|
||||
// # Mail Relay: https://bitwarden.com/blog/add-privacy-and-security-using-email-aliases-with-bitwarden/
|
||||
// app.simplelogin.io, app.addy.io, api.fastmail.com, api.forwardemail.net
|
||||
format!(
|
||||
"default-src 'none'; \
|
||||
font-src 'self'; \
|
||||
manifest-src 'self'; \
|
||||
base-uri 'self'; \
|
||||
form-action 'self'; \
|
||||
object-src 'self' blob:; \
|
||||
script-src 'self' 'wasm-unsafe-eval'; \
|
||||
style-src 'self' 'unsafe-inline'; \
|
||||
child-src 'self' https://*.duosecurity.com https://*.duofederal.com; \
|
||||
frame-src 'self' https://*.duosecurity.com https://*.duofederal.com; \
|
||||
frame-ancestors 'self' \
|
||||
chrome-extension://nngceckbapebfimnlniiiahkandclblb \
|
||||
chrome-extension://jbkfoedolllekgbhcbcoahefnbanhhlh \
|
||||
moz-extension://* \
|
||||
{allowed_iframe_ancestors}; \
|
||||
img-src 'self' data: \
|
||||
https://haveibeenpwned.com \
|
||||
{icon_service_csp}; \
|
||||
connect-src 'self' \
|
||||
https://api.pwnedpasswords.com \
|
||||
https://api.2fa.directory \
|
||||
https://app.simplelogin.io/api/ \
|
||||
https://app.addy.io/api/ \
|
||||
https://api.fastmail.com/ \
|
||||
https://api.forwardemail.net \
|
||||
{allowed_connect_src};\
|
||||
",
|
||||
icon_service_csp = CONFIG._icon_service_csp(),
|
||||
allowed_iframe_ancestors = CONFIG.allowed_iframe_ancestors(),
|
||||
allowed_connect_src = CONFIG.allowed_connect_src(),
|
||||
)
|
||||
};
|
||||
|
||||
res.set_raw_header("Content-Security-Policy", csp);
|
||||
res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
|
||||
} else {
|
||||
@@ -264,8 +277,8 @@ impl Fairing for BetterLogging {
|
||||
} else {
|
||||
"http"
|
||||
};
|
||||
let addr = format!("{}://{}:{}", &scheme, &config.address, &config.port);
|
||||
info!(target: "start", "Rocket has launched from {}", addr);
|
||||
let addr = format!("{scheme}://{}:{}", &config.address, &config.port);
|
||||
info!(target: "start", "Rocket has launched from {addr}");
|
||||
}
|
||||
|
||||
async fn on_request(&self, request: &mut Request<'_>, _data: &mut Data<'_>) {
|
||||
@@ -279,8 +292,8 @@ impl Fairing for BetterLogging {
|
||||
let uri_subpath = uri_path_str.strip_prefix(&CONFIG.domain_path()).unwrap_or(&uri_path_str);
|
||||
if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) {
|
||||
match uri.query() {
|
||||
Some(q) => info!(target: "request", "{} {}?{}", method, uri_path_str, &q[..q.len().min(30)]),
|
||||
None => info!(target: "request", "{} {}", method, uri_path_str),
|
||||
Some(q) => info!(target: "request", "{method} {uri_path_str}?{}", &q[..q.len().min(30)]),
|
||||
None => info!(target: "request", "{method} {uri_path_str}"),
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -295,9 +308,9 @@ impl Fairing for BetterLogging {
|
||||
if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) {
|
||||
let status = response.status();
|
||||
if let Some(ref route) = request.route() {
|
||||
info!(target: "response", "{} => {}", route, status)
|
||||
info!(target: "response", "{route} => {status}")
|
||||
} else {
|
||||
info!(target: "response", "{}", status)
|
||||
info!(target: "response", "{status}")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -322,7 +335,7 @@ pub fn get_display_size(size: i64) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
format!("{:.2} {}", size, UNITS[unit_counter])
|
||||
format!("{size:.2} {}", UNITS[unit_counter])
|
||||
}
|
||||
|
||||
pub fn get_uuid() -> String {
|
||||
@@ -695,7 +708,7 @@ where
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
warn!("Can't connect to database, retrying: {:?}", e);
|
||||
warn!("Can't connect to database, retrying: {e:?}");
|
||||
|
||||
sleep(Duration::from_millis(1_000)).await;
|
||||
}
|
||||
@@ -748,9 +761,20 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
|
||||
|
||||
/// Parses the experimental client feature flags string into a HashMap.
|
||||
pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags: &str) -> HashMap<String, bool> {
|
||||
let feature_states = experimental_client_feature_flags.split(',').map(|f| (f.trim().to_owned(), true)).collect();
|
||||
|
||||
feature_states
|
||||
// These flags could still be configured, but are deprecated and not used anymore
|
||||
// To prevent old installations from starting filter these out and not error out
|
||||
const DEPRECATED_FLAGS: &[&str] =
|
||||
&["autofill-overlay", "autofill-v2", "browser-fileless-import", "extension-refresh", "fido2-vault-credentials"];
|
||||
experimental_client_feature_flags
|
||||
.split(',')
|
||||
.filter_map(|f| {
|
||||
let flag = f.trim();
|
||||
if !flag.is_empty() && !DEPRECATED_FLAGS.contains(&flag) {
|
||||
return Some((flag.to_owned(), true));
|
||||
}
|
||||
None
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
||||
@@ -812,6 +836,26 @@ pub fn is_global(ip: std::net::IpAddr) -> bool {
|
||||
ip.is_global()
|
||||
}
|
||||
|
||||
/// Saves a Rocket temporary file to the OpenDAL Operator at the given path.
|
||||
pub async fn save_temp_file(
|
||||
path_type: PathType,
|
||||
path: &str,
|
||||
temp_file: rocket::fs::TempFile<'_>,
|
||||
overwrite: bool,
|
||||
) -> Result<(), crate::Error> {
|
||||
use futures::AsyncWriteExt as _;
|
||||
use tokio_util::compat::TokioAsyncReadCompatExt as _;
|
||||
|
||||
let operator = CONFIG.opendal_operator_for_path_type(path_type)?;
|
||||
|
||||
let mut read_stream = temp_file.open().await?.compat();
|
||||
let mut writer = operator.writer_with(path).if_not_exists(!overwrite).await?.into_futures_async_write();
|
||||
futures::io::copy(&mut read_stream, &mut writer).await?;
|
||||
writer.close().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// These are some tests to check that the implementations match
|
||||
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
|
||||
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
|
||||
|
Reference in New Issue
Block a user