Compare commits

..

1 Commits

Author SHA1 Message Date
Mathijs van Veluw 0a77776a8b Revert "Disable deployments for release env (#7033)"
This reverts commit 8f0e99b875.
2026-04-12 16:57:48 +02:00
61 changed files with 578 additions and 1491 deletions
+1
View File
@@ -1,2 +1,3 @@
# Ignore vendored scripts in GitHub stats
src/static/scripts/* linguist-vendored
+2 -6
View File
@@ -38,9 +38,7 @@ jobs:
docker-build:
name: Build Vaultwarden containers
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
environment:
name: release
deployment: false
environment: release
permissions:
packages: write # Needed to upload packages and artifacts
contents: read
@@ -251,9 +249,7 @@ jobs:
name: Merge manifests
runs-on: ubuntu-latest
needs: docker-build
environment:
name: release
deployment: false
environment: release
permissions:
packages: write # Needed to upload packages and artifacts
attestations: write # Needed to generate an artifact attestation for a build
+2 -2
View File
@@ -38,7 +38,7 @@ jobs:
persist-credentials: false
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@ed142fd0673e97e23eac54620cfb913e5ce36c25 # v0.36.0
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
env:
TRIVY_DB_REPOSITORY: docker.io/aquasec/trivy-db:2,public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2
TRIVY_JAVA_DB_REPOSITORY: docker.io/aquasec/trivy-java-db:1,public.ecr.aws/aquasecurity/trivy-java-db:1,ghcr.io/aquasecurity/trivy-java-db:1
@@ -50,6 +50,6 @@ jobs:
severity: CRITICAL,HIGH
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2
uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1
with:
sarif_file: 'trivy-results.sarif'
+1 -1
View File
@@ -23,4 +23,4 @@ jobs:
# When this version is updated, do not forget to update this in `.pre-commit-config.yaml` too
- name: Spell Check Repo
uses: crate-ci/typos@7c572958218557a3272c2d6719629443b5cc26fd # v1.45.2
uses: crate-ci/typos@02ea592e44b3a53c302f697cddca7641cd051c3d # v1.45.0
+1 -1
View File
@@ -24,7 +24,7 @@ jobs:
persist-credentials: false
- name: Run zizmor
uses: zizmorcore/zizmor-action@b1d7e1fb5de872772f31590499237e7cce841e8e # v0.5.3
uses: zizmorcore/zizmor-action@71321a20a9ded102f6e9ce5718a2fcec2c4f70d8 # v0.5.2
with:
# intentionally not scanning the entire repository,
# since it contains integration tests.
+53 -55
View File
@@ -1,60 +1,58 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # v6.0.0
hooks:
- id: check-yaml
- id: check-json
- id: check-toml
- id: mixed-line-ending
args: [ "--fix=no" ]
- id: end-of-file-fixer
exclude: "(.*js$|.*css$)"
- id: check-case-conflict
- id: check-merge-conflict
- id: detect-private-key
- id: check-symlinks
- id: forbid-submodules
# When this version is updated, do not forget to update this in `.github/workflows/typos.yaml` too
- repo: https://github.com/crate-ci/typos
rev: 7c572958218557a3272c2d6719629443b5cc26fd # v1.45.2
- id: check-yaml
- id: check-json
- id: check-toml
- id: mixed-line-ending
args: ["--fix=no"]
- id: end-of-file-fixer
exclude: "(.*js$|.*css$)"
- id: check-case-conflict
- id: check-merge-conflict
- id: detect-private-key
- id: check-symlinks
- id: forbid-submodules
- repo: local
hooks:
- id: typos
- repo: local
hooks:
- id: fmt
name: fmt
description: Format files with cargo fmt.
entry: cargo fmt
language: system
always_run: true
pass_filenames: false
args: [ "--", "--check" ]
- id: cargo-test
name: cargo test
description: Test the package for errors.
entry: cargo test
language: system
args: [ "--features", "sqlite,mysql,postgresql", "--" ]
types_or: [ rust, file ]
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
pass_filenames: false
- id: cargo-clippy
name: cargo clippy
description: Lint Rust sources
entry: cargo clippy
language: system
args: [ "--features", "sqlite,mysql,postgresql", "--", "-D", "warnings" ]
types_or: [ rust, file ]
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
pass_filenames: false
- id: check-docker-templates
name: check docker templates
description: Check if the Docker templates are updated
language: system
entry: sh
args:
- "-c"
- "cd docker && make"
- id: fmt
name: fmt
description: Format files with cargo fmt.
entry: cargo fmt
language: system
always_run: true
pass_filenames: false
args: ["--", "--check"]
- id: cargo-test
name: cargo test
description: Test the package for errors.
entry: cargo test
language: system
args: ["--features", "sqlite,mysql,postgresql", "--"]
types_or: [rust, file]
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
pass_filenames: false
- id: cargo-clippy
name: cargo clippy
description: Lint Rust sources
entry: cargo clippy
language: system
args: ["--features", "sqlite,mysql,postgresql", "--", "-D", "warnings"]
types_or: [rust, file]
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
pass_filenames: false
- id: check-docker-templates
name: check docker templates
description: Check if the Docker templates are updated
language: system
entry: sh
args:
- "-c"
- "cd docker && make"
# When this version is updated, do not forget to update this in `.github/workflows/typos.yaml` too
- repo: https://github.com/crate-ci/typos
rev: 02ea592e44b3a53c302f697cddca7641cd051c3d # v1.45.0
hooks:
- id: typos
-2
View File
@@ -23,6 +23,4 @@ extend-ignore-re = [
# https://github.com/bitwarden/server/blob/dff9f1cf538198819911cf2c20f8cda3307701c5/src/Notifications/HubHelpers.cs#L86
# https://github.com/bitwarden/clients/blob/9612a4ac45063e372a6fbe87eb253c7cb3c588fb/libs/common/src/auth/services/anonymous-hub.service.ts#L45
"AuthRequestResponseRecieved",
# Ignore Punycode/IDN tests
"xn--.+"
]
Generated
+208 -411
View File
File diff suppressed because it is too large Load Diff
+20 -24
View File
@@ -1,6 +1,6 @@
[workspace.package]
edition = "2021"
rust-version = "1.93.0"
rust-version = "1.92.0"
license = "AGPL-3.0-only"
repository = "https://github.com/dani-garcia/vaultwarden"
publish = false
@@ -23,17 +23,15 @@ publish.workspace = true
[features]
default = [
# "sqlite" or "sqlite_system",
# "sqlite",
# "mysql",
# "postgresql",
]
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
enable_syslog = []
# Please enable at least one of these DB backends.
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
sqlite_system = ["diesel/sqlite", "diesel_migrations/sqlite"]
sqlite = ["sqlite_system", "libsqlite3-sys/bundled"] # Alternative to the above, statically linked SQLite into the binary instead of dynamically.
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "dep:libsqlite3-sys"]
# Enable to use a vendored and statically linked openssl
vendored_openssl = ["openssl/vendored"]
# Enable MiMalloc memory allocator to replace the default malloc
@@ -81,7 +79,7 @@ dashmap = "6.1.0"
# Async futures
futures = "0.3.32"
tokio = { version = "1.52.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
tokio = { version = "1.51.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
tokio-util = { version = "0.7.18", features = ["compat"]}
# A generic serialization/deserialization framework
@@ -90,14 +88,14 @@ serde_json = "1.0.149"
# A safe, extensible ORM and Query builder
# Currently pinned diesel to v2.3.3 as newer version break MySQL/MariaDB compatibility
diesel = { version = "2.3.9", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.3.2"
diesel = { version = "2.3.7", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.3.1"
derive_more = { version = "2.1.1", features = ["from", "into", "as_ref", "deref", "display"] }
diesel-derive-newtype = "2.1.2"
# SQLite, statically bundled unless the `sqlite_system` feature is enabled
libsqlite3-sys = { version = "0.37.0", optional = true }
# Bundled/Static SQLite
libsqlite3-sys = { version = "0.36.0", features = ["bundled"], optional = true }
# Crypto-related libraries
rand = "0.10.1"
@@ -105,7 +103,7 @@ ring = "0.17.14"
subtle = "2.6.1"
# UUID generation
uuid = { version = "1.23.1", features = ["v4"] }
uuid = { version = "1.23.0", features = ["v4"] }
# Date and time libraries
chrono = { version = "0.4.44", features = ["clock", "serde"], default-features = false }
@@ -116,7 +114,7 @@ time = "0.3.47"
job_scheduler_ng = "2.4.0"
# Data encoding library Hex/Base32/Base64
data-encoding = "2.11.0"
data-encoding = "2.10.0"
# JWT library
jsonwebtoken = { version = "10.3.0", features = ["use_pem", "rust_crypto"], default-features = false }
@@ -130,9 +128,9 @@ yubico = { package = "yubico_ng", version = "0.14.1", features = ["online-tokio"
# WebAuthn libraries
# danger-allow-state-serialisation is needed to save the state in the db
# danger-credential-internals is needed to support U2F to Webauthn migration
webauthn-rs = { version = "0.5.5", features = ["danger-allow-state-serialisation", "danger-credential-internals"] }
webauthn-rs-proto = "0.5.5"
webauthn-rs-core = "0.5.5"
webauthn-rs = { version = "0.5.4", features = ["danger-allow-state-serialisation", "danger-credential-internals"] }
webauthn-rs-proto = "0.5.4"
webauthn-rs-core = "0.5.4"
# Handling of URL's for WebAuthn and favicons
url = "2.5.8"
@@ -147,7 +145,7 @@ handlebars = { version = "6.4.0", features = ["dir_source"] }
# HTTP client (Used for favicons, version check, DUO and HIBP API)
reqwest = { version = "0.12.28", features = ["rustls-tls", "rustls-tls-native-roots", "stream", "json", "deflate", "gzip", "brotli", "zstd", "socks", "cookies", "charset", "http2", "system-proxy"], default-features = false}
hickory-resolver = "0.26.1"
hickory-resolver = "0.25.2"
# Favicon extraction libraries
html5gum = "0.8.3"
@@ -164,13 +162,13 @@ cookie = "0.18.1"
cookie_store = "0.22.1"
# Used by U2F, JWT and PostgreSQL
openssl = "0.10.78"
openssl = "0.10.76"
# CLI argument parsing
pico-args = "0.5.0"
# Macro ident concatenation
pastey = "0.2.2"
pastey = "0.2.1"
governor = "0.10.4"
# OIDC for SSO
@@ -182,7 +180,7 @@ semver = "1.0.28"
# Allow overriding the default memory allocator
# Mainly used for the musl builds, since the default musl malloc is very slow
mimalloc = { version = "0.1.50", features = ["secure"], default-features = false, optional = true }
mimalloc = { version = "0.1.48", features = ["secure"], default-features = false, optional = true }
which = "8.0.2"
@@ -190,7 +188,7 @@ which = "8.0.2"
argon2 = "0.5.3"
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
rpassword = "7.5.1"
rpassword = "7.4.0"
# Loading a dynamic CSS Stylesheet
grass_compiler = { version = "0.13.4", default-features = false }
@@ -200,9 +198,9 @@ opendal = { version = "0.55.0", features = ["services-fs"], default-features = f
# For retrieving AWS credentials, including temporary SSO credentials
anyhow = { version = "1.0.102", optional = true }
aws-config = { version = "1.8.16", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true }
aws-config = { version = "1.8.15", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true }
aws-credential-types = { version = "1.2.14", optional = true }
aws-smithy-runtime-api = { version = "1.12.0", optional = true }
aws-smithy-runtime-api = { version = "1.11.6", optional = true }
http = { version = "1.4.0", optional = true }
reqsign = { version = "0.16.5", optional = true }
@@ -303,7 +301,6 @@ branches_sharing_code = "deny"
case_sensitive_file_extension_comparisons = "deny"
cast_lossless = "deny"
clone_on_ref_ptr = "deny"
duration_suboptimal_units = "deny"
equatable_if_let = "deny"
excessive_precision = "deny"
filter_map_next = "deny"
@@ -325,7 +322,6 @@ needless_continue = "deny"
needless_lifetimes = "deny"
option_option = "deny"
redundant_clone = "deny"
ref_option = "deny"
string_add_assign = "deny"
unnecessary_join = "deny"
unnecessary_self_imports = "deny"
+2 -3
View File
@@ -59,9 +59,8 @@ A nearly complete implementation of the Bitwarden Client API is provided, includ
## Usage
> [!IMPORTANT]
> The web-vault requires the use of HTTPS and a secure context for the [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API). <br>
> That means it will only work if you [enable HTTPS](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS). <br>
> We also suggest to use a [reverse proxy](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples).
> The web-vault requires the use a secure context for the [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API).
> That means it will only work via `http://localhost:8000` (using the port from the example below) or if you [enable HTTPS](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS).
The recommended way to install and use Vaultwarden is via our container images which are published to [ghcr.io](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden), [docker.io](https://hub.docker.com/r/vaultwarden/server) and [quay.io](https://quay.io/repository/vaultwarden/server).
See [which container image to use](https://github.com/dani-garcia/vaultwarden/wiki/Which-container-image-to-use) for an explanation of the provided tags.
+6 -6
View File
@@ -2,21 +2,21 @@ use std::env;
use std::process::Command;
fn main() {
// These allow using e.g. #[cfg(mysql)] instead of #[cfg(feature = "mysql")], which helps when trying to add them through macros
#[cfg(feature = "sqlite_system")] // The `sqlite` feature implies this one.
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
#[cfg(feature = "sqlite")]
println!("cargo:rustc-cfg=sqlite");
#[cfg(feature = "mysql")]
println!("cargo:rustc-cfg=mysql");
#[cfg(feature = "postgresql")]
println!("cargo:rustc-cfg=postgresql");
#[cfg(not(any(feature = "sqlite_system", feature = "mysql", feature = "postgresql")))]
#[cfg(feature = "s3")]
println!("cargo:rustc-cfg=s3");
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
compile_error!(
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
);
#[cfg(feature = "s3")]
println!("cargo:rustc-cfg=s3");
// Use check-cfg to let cargo know which cfg's we define,
// and avoid warnings when they are used in the code.
println!("cargo::rustc-check-cfg=cfg(sqlite)");
+1 -1
View File
@@ -2,4 +2,4 @@
# see diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/db/schema.rs"
file = "src/db/schema.rs"
+3 -3
View File
@@ -1,11 +1,11 @@
---
vault_version: "v2026.4.1"
vault_image_digest: "sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe"
vault_version: "v2026.2.0"
vault_image_digest: "sha256:37c8661fa59dcdfbd3baa8366b6e950ef292b15adfeff1f57812b075c1fd3447"
# Cross Compile Docker Helper Scripts v1.9.0
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
xx_image_digest: "sha256:c64defb9ed5a91eacb37f96ccc3d4cd72521c4bd18d5442905b95e2226b0e707"
rust_version: 1.95.0 # Rust version to be used
rust_version: 1.94.1 # Rust version to be used
debian_version: trixie # Debian release name to be used
alpine_version: "3.23" # Alpine version to be used
# For which platforms/architectures will we try to build images
+10 -10
View File
@@ -19,23 +19,23 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2026.4.1
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2026.4.1
# [docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe]
# $ docker pull docker.io/vaultwarden/web-vault:v2026.2.0
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2026.2.0
# [docker.io/vaultwarden/web-vault@sha256:37c8661fa59dcdfbd3baa8366b6e950ef292b15adfeff1f57812b075c1fd3447]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe
# [docker.io/vaultwarden/web-vault:v2026.4.1]
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:37c8661fa59dcdfbd3baa8366b6e950ef292b15adfeff1f57812b075c1fd3447
# [docker.io/vaultwarden/web-vault:v2026.2.0]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe AS vault
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:37c8661fa59dcdfbd3baa8366b6e950ef292b15adfeff1f57812b075c1fd3447 AS vault
########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 and linux/arm64
## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.95.0 AS build_amd64
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.95.0 AS build_arm64
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.95.0 AS build_armv7
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.95.0 AS build_armv6
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.94.1 AS build_amd64
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.94.1 AS build_arm64
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.94.1 AS build_armv7
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.94.1 AS build_armv6
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
+7 -7
View File
@@ -19,15 +19,15 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2026.4.1
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2026.4.1
# [docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe]
# $ docker pull docker.io/vaultwarden/web-vault:v2026.2.0
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2026.2.0
# [docker.io/vaultwarden/web-vault@sha256:37c8661fa59dcdfbd3baa8366b6e950ef292b15adfeff1f57812b075c1fd3447]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe
# [docker.io/vaultwarden/web-vault:v2026.4.1]
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:37c8661fa59dcdfbd3baa8366b6e950ef292b15adfeff1f57812b075c1fd3447
# [docker.io/vaultwarden/web-vault:v2026.2.0]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe AS vault
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:37c8661fa59dcdfbd3baa8366b6e950ef292b15adfeff1f57812b075c1fd3447 AS vault
########################## Cross Compile Docker Helper Scripts ##########################
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
@@ -36,7 +36,7 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c64defb9ed5a91eacb37f
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.95.0-slim-trixie AS build
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.94.1-slim-trixie AS build
COPY --from=xx / /
ARG TARGETARCH
ARG TARGETVARIANT
@@ -1 +0,0 @@
DROP TABLE IF EXISTS archives;
@@ -1,10 +0,0 @@
DROP TABLE IF EXISTS archives;
CREATE TABLE archives (
user_uuid CHAR(36) NOT NULL,
cipher_uuid CHAR(36) NOT NULL,
archived_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (user_uuid, cipher_uuid),
FOREIGN KEY (user_uuid) REFERENCES users (uuid) ON DELETE CASCADE,
FOREIGN KEY (cipher_uuid) REFERENCES ciphers (uuid) ON DELETE CASCADE
);
@@ -1 +0,0 @@
ALTER TABLE sso_auth DROP COLUMN binding_hash;
@@ -1 +0,0 @@
ALTER TABLE sso_auth ADD COLUMN binding_hash TEXT;
@@ -1 +0,0 @@
DROP TABLE IF EXISTS archives;
@@ -1,8 +0,0 @@
DROP TABLE IF EXISTS archives;
CREATE TABLE archives (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
archived_at TIMESTAMP NOT NULL DEFAULT now(),
PRIMARY KEY (user_uuid, cipher_uuid)
);
@@ -1 +0,0 @@
ALTER TABLE sso_auth DROP COLUMN binding_hash;
@@ -1 +0,0 @@
ALTER TABLE sso_auth ADD COLUMN binding_hash TEXT;
@@ -1 +0,0 @@
DROP TABLE IF EXISTS archives;
@@ -1,8 +0,0 @@
DROP TABLE IF EXISTS archives;
CREATE TABLE archives (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
archived_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (user_uuid, cipher_uuid)
);
@@ -1 +0,0 @@
ALTER TABLE sso_auth DROP COLUMN binding_hash;
@@ -1 +0,0 @@
ALTER TABLE sso_auth ADD COLUMN binding_hash TEXT;
+1 -1
View File
@@ -1,4 +1,4 @@
[toolchain]
channel = "1.95.0"
channel = "1.94.1"
components = [ "rustfmt", "clippy" ]
profile = "minimal"
+3 -12
View File
@@ -30,7 +30,6 @@ use crate::{
error::{Error, MapResult},
http_client::make_http_request,
mail,
sso::FAKE_SSO_IDENTIFIER,
util::{
container_base_image, format_naive_datetime_local, get_active_web_release, get_display_size,
is_running_in_container, parse_experimental_client_feature_flags, FeatureFlagFilter, NumberOrString,
@@ -316,11 +315,7 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -
async fn _generate_invite(user: &User, conn: &DbConn) -> EmptyResult {
if CONFIG.mail_enabled() {
let org_id: OrganizationId = if CONFIG.sso_enabled() {
FAKE_SSO_IDENTIFIER.into()
} else {
FAKE_ADMIN_UUID.into()
};
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
mail::send_invite(user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
} else {
@@ -469,7 +464,7 @@ async fn deauth_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Noti
if CONFIG.push_enabled() {
for device in Device::find_push_devices_by_user(&user.uuid, &conn).await {
match unregister_push_device(device.push_uuid.as_ref()).await {
match unregister_push_device(&device.push_uuid).await {
Ok(r) => r,
Err(e) => error!("Unable to unregister devices from Bitwarden server: {e}"),
};
@@ -523,11 +518,7 @@ async fn resend_user_invite(user_id: UserId, _token: AdminToken, conn: DbConn) -
}
if CONFIG.mail_enabled() {
let org_id: OrganizationId = if CONFIG.sso_enabled() {
FAKE_SSO_IDENTIFIER.into()
} else {
FAKE_ADMIN_UUID.into()
};
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
mail::send_invite(&user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
} else {
+10 -10
View File
@@ -137,7 +137,7 @@ struct KeysData {
}
/// Trims whitespace from password hints, and converts blank password hints to `None`.
fn clean_password_hint(password_hint: Option<&String>) -> Option<String> {
fn clean_password_hint(password_hint: &Option<String>) -> Option<String> {
match password_hint {
None => None,
Some(h) => match h.trim() {
@@ -147,7 +147,7 @@ fn clean_password_hint(password_hint: Option<&String>) -> Option<String> {
}
}
fn enforce_password_hint_setting(password_hint: Option<&String>) -> EmptyResult {
fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult {
if password_hint.is_some() && !CONFIG.password_hints_allowed() {
err!("Password hints have been disabled by the administrator. Remove the hint and try again.");
}
@@ -245,8 +245,8 @@ pub async fn _register(data: Json<RegisterData>, email_verification: bool, conn:
// Check against the password hint setting here so if it fails, the user
// can retry without losing their invitation below.
let password_hint = clean_password_hint(data.master_password_hint.as_ref());
enforce_password_hint_setting(password_hint.as_ref())?;
let password_hint = clean_password_hint(&data.master_password_hint);
enforce_password_hint_setting(&password_hint)?;
let mut user = match User::find_by_mail(&email, &conn).await {
Some(user) => {
@@ -353,8 +353,8 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, conn:
// Check against the password hint setting here so if it fails,
// the user can retry without losing their invitation below.
let password_hint = clean_password_hint(data.master_password_hint.as_ref());
enforce_password_hint_setting(password_hint.as_ref())?;
let password_hint = clean_password_hint(&data.master_password_hint);
enforce_password_hint_setting(&password_hint)?;
set_kdf_data(&mut user, &data.kdf)?;
@@ -374,7 +374,7 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, conn:
}
if let Some(identifier) = data.org_identifier {
if identifier != crate::sso::FAKE_SSO_IDENTIFIER && identifier != crate::api::admin::FAKE_ADMIN_UUID {
if identifier != crate::sso::FAKE_IDENTIFIER && identifier != crate::api::admin::FAKE_ADMIN_UUID {
let Some(org) = Organization::find_by_uuid(&identifier.into(), &conn).await else {
err!("Failed to retrieve the associated organization")
};
@@ -515,8 +515,8 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, conn: DbCon
err!("Invalid password")
}
user.password_hint = clean_password_hint(data.master_password_hint.as_ref());
enforce_password_hint_setting(user.password_hint.as_ref())?;
user.password_hint = clean_password_hint(&data.master_password_hint);
enforce_password_hint_setting(&user.password_hint)?;
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn)
.await;
@@ -1438,7 +1438,7 @@ async fn put_clear_device_token(device_id: DeviceId, conn: DbConn) -> EmptyResul
if let Some(device) = Device::find_by_uuid(&device_id, &conn).await {
Device::clear_push_token_by_uuid(&device_id, &conn).await?;
unregister_push_device(device.push_uuid.as_ref()).await?;
unregister_push_device(&device.push_uuid).await?;
}
Ok(())
+16 -189
View File
@@ -19,9 +19,9 @@ use crate::{
crypto,
db::{
models::{
Archive, Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup,
CollectionId, CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership,
MembershipType, OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId,
CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership, MembershipType,
OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
},
DbConn, DbPool,
},
@@ -96,10 +96,6 @@ pub fn routes() -> Vec<Route> {
post_collections_update,
post_collections_admin,
put_collections_admin,
archive_cipher_put,
archive_cipher_selected,
unarchive_cipher_put,
unarchive_cipher_selected,
]
}
@@ -297,7 +293,6 @@ pub struct CipherData {
// when using older client versions, or if the operation doesn't involve
// updating an existing cipher.
last_known_revision_date: Option<String>,
archived_date: Option<String>,
}
#[derive(Debug, Deserialize)]
@@ -539,13 +534,6 @@ pub async fn update_cipher_from_data(
cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?;
cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?;
if let Some(dt_str) = data.archived_date {
match NaiveDateTime::parse_from_str(&dt_str, "%+") {
Ok(dt) => cipher.set_archived_at(dt, &headers.user.uuid, conn).await?,
Err(err) => warn!("Error parsing ArchivedDate '{dt_str}': {err}"),
}
}
if ut != UpdateType::None {
// Only log events for organizational ciphers
if let Some(org_id) = &cipher.organization_uuid {
@@ -642,7 +630,7 @@ async fn post_ciphers_import(data: Json<ImportData>, headers: Headers, conn: DbC
let mut user = headers.user;
user.update_revision(&conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
Ok(())
}
@@ -814,16 +802,12 @@ async fn post_collections_update(
err!("Collection cannot be changed")
}
let Some(ref org_uuid) = cipher.organization_uuid else {
err!("Cipher is not owned by an organization")
};
let posted_collections = HashSet::<CollectionId>::from_iter(data.collection_ids);
let current_collections =
HashSet::<CollectionId>::from_iter(cipher.get_collections(headers.user.uuid.clone(), &conn).await);
for collection in posted_collections.symmetric_difference(&current_collections) {
match Collection::find_by_uuid_and_org(collection, org_uuid, &conn).await {
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await {
None => err!("Invalid collection ID provided"),
Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, &conn).await {
@@ -854,7 +838,7 @@ async fn post_collections_update(
log_event(
EventType::CipherUpdatedCollections as i32,
&cipher.uuid,
org_uuid,
&cipher.organization_uuid.clone().unwrap(),
&headers.user.uuid,
headers.device.atype,
&headers.ip.ip,
@@ -894,16 +878,12 @@ async fn post_collections_admin(
err!("Collection cannot be changed")
}
let Some(ref org_uuid) = cipher.organization_uuid else {
err!("Cipher is not owned by an organization")
};
let posted_collections = HashSet::<CollectionId>::from_iter(data.collection_ids);
let current_collections =
HashSet::<CollectionId>::from_iter(cipher.get_admin_collections(headers.user.uuid.clone(), &conn).await);
for collection in posted_collections.symmetric_difference(&current_collections) {
match Collection::find_by_uuid_and_org(collection, org_uuid, &conn).await {
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await {
None => err!("Invalid collection ID provided"),
Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, &conn).await {
@@ -934,7 +914,7 @@ async fn post_collections_admin(
log_event(
EventType::CipherUpdatedCollections as i32,
&cipher.uuid,
org_uuid,
&cipher.organization_uuid.unwrap(),
&headers.user.uuid,
headers.device.atype,
&headers.ip.ip,
@@ -1025,7 +1005,7 @@ async fn put_cipher_share_selected(
}
// Multi share actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
Ok(())
}
@@ -1638,7 +1618,7 @@ async fn move_cipher_selected(
.await;
} else {
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
}
if cipher_count != accessible_ciphers_count {
@@ -1690,7 +1670,7 @@ async fn purge_org_vault(
match Membership::find_confirmed_by_user_and_org(&user.uuid, &organization.org_id, &conn).await {
Some(member) if member.atype == MembershipType::Owner => {
Cipher::delete_all_by_organization(&organization.org_id, &conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
log_event(
EventType::OrganizationPurgedVault as i32,
@@ -1730,41 +1710,11 @@ async fn purge_personal_vault(
}
user.update_revision(&conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
Ok(())
}
#[put("/ciphers/<cipher_id>/archive")]
async fn archive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
archive_cipher(&cipher_id, &headers, false, &conn, &nt).await
}
#[put("/ciphers/archive", data = "<data>")]
async fn archive_cipher_selected(
data: Json<CipherIdsData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
archive_multiple_ciphers(data, &headers, &conn, &nt).await
}
#[put("/ciphers/<cipher_id>/unarchive")]
async fn unarchive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
unarchive_cipher(&cipher_id, &headers, false, &conn, &nt).await
}
#[put("/ciphers/unarchive", data = "<data>")]
async fn unarchive_cipher_selected(
data: Json<CipherIdsData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
unarchive_multiple_ciphers(data, &headers, &conn, &nt).await
}
#[derive(PartialEq)]
pub enum CipherDeleteOptions {
SoftSingle,
@@ -1855,7 +1805,7 @@ async fn _delete_multiple_ciphers(
}
// Multi delete actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
Ok(())
}
@@ -1923,7 +1873,7 @@ async fn _restore_multiple_ciphers(
}
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), conn).await;
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await;
Ok(Json(json!({
"data": ciphers,
@@ -1983,122 +1933,6 @@ async fn _delete_cipher_attachment_by_id(
Ok(Json(json!({"cipher":cipher_json})))
}
async fn archive_cipher(
cipher_id: &CipherId,
headers: &Headers,
multi_archive: bool,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
err!("Cipher doesn't exist")
};
if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await {
err!("Cipher is not accessible for the current user")
}
cipher.set_archived_at(Utc::now().naive_utc(), &headers.user.uuid, conn).await?;
if !multi_archive {
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
&cipher,
&cipher.update_users_revision(conn).await,
&headers.device,
None,
conn,
)
.await;
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
async fn unarchive_cipher(
cipher_id: &CipherId,
headers: &Headers,
multi_unarchive: bool,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
err!("Cipher doesn't exist")
};
if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await {
err!("Cipher is not accessible for the current user")
}
cipher.unarchive(&headers.user.uuid, conn).await?;
if !multi_unarchive {
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
&cipher,
&cipher.update_users_revision(conn).await,
&headers.device,
None,
conn,
)
.await;
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
async fn archive_multiple_ciphers(
data: Json<CipherIdsData>,
headers: &Headers,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let data = data.into_inner();
let mut ciphers: Vec<Value> = Vec::new();
for cipher_id in data.ids {
match archive_cipher(&cipher_id, headers, true, conn, nt).await {
Ok(json) => ciphers.push(json.into_inner()),
err => return err,
}
}
// Multi archive does not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), conn).await;
Ok(Json(json!({
"data": ciphers,
"object": "list",
"continuationToken": null
})))
}
async fn unarchive_multiple_ciphers(
data: Json<CipherIdsData>,
headers: &Headers,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let data = data.into_inner();
let mut ciphers: Vec<Value> = Vec::new();
for cipher_id in data.ids {
match unarchive_cipher(&cipher_id, headers, true, conn, nt).await {
Ok(json) => ciphers.push(json.into_inner()),
err => return err,
}
}
// Multi unarchive does not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), conn).await;
Ok(Json(json!({
"data": ciphers,
"object": "list",
"continuationToken": null
})))
}
/// This will hold all the necessary data to improve a full sync of all the ciphers
/// It can be used during the `Cipher::to_json()` call.
/// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed.
@@ -2108,7 +1942,6 @@ pub struct CipherSyncData {
pub cipher_folders: HashMap<CipherId, FolderId>,
pub cipher_favorites: HashSet<CipherId>,
pub cipher_collections: HashMap<CipherId, Vec<CollectionId>>,
pub cipher_archives: HashMap<CipherId, NaiveDateTime>,
pub members: HashMap<OrganizationId, Membership>,
pub user_collections: HashMap<CollectionId, CollectionUser>,
pub user_collections_groups: HashMap<CollectionId, CollectionGroup>,
@@ -2125,25 +1958,20 @@ impl CipherSyncData {
pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &DbConn) -> Self {
let cipher_folders: HashMap<CipherId, FolderId>;
let cipher_favorites: HashSet<CipherId>;
let cipher_archives: HashMap<CipherId, NaiveDateTime>;
match sync_type {
// User Sync supports Folders, Favorites, and Archives
// User Sync supports Folders and Favorites
CipherSyncType::User => {
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
cipher_folders = FolderCipher::find_by_user(user_id, conn).await.into_iter().collect();
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_id, conn).await.into_iter().collect();
// Generate a HashMap with the Cipher UUID as key and the archived date time as value
cipher_archives = Archive::find_by_user(user_id, conn).await.into_iter().collect();
}
// Organization Sync does not support Folders, Favorites, or Archives.
// Organization Sync does not support Folders and Favorites.
// If these are set, it will cause issues in the web-vault.
CipherSyncType::Organization => {
cipher_folders = HashMap::with_capacity(0);
cipher_favorites = HashSet::with_capacity(0);
cipher_archives = HashMap::with_capacity(0);
}
}
@@ -2206,7 +2034,6 @@ impl CipherSyncData {
};
Self {
cipher_archives,
cipher_attachments,
cipher_folders,
cipher_favorites,
+3 -3
View File
@@ -124,7 +124,7 @@ async fn post_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: Db
user.save(&conn).await?;
nt.send_user_update(UpdateType::SyncSettings, &user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncSettings, &user, &headers.device.push_uuid, &conn).await;
Ok(Json(json!({})))
}
@@ -204,11 +204,11 @@ fn config() -> Json<Value> {
// Client (v2026.2.1): https://github.com/bitwarden/clients/blob/f96380c3138291a028bdd2c7a5fee540d5c98ba5/libs/common/src/enums/feature-flag.enum.ts#L12
// Android (v2026.2.1): https://github.com/bitwarden/android/blob/6902c19c0093fa476bbf74ccaa70c9f14afbb82f/core/src/main/kotlin/com/bitwarden/core/data/manager/model/FlagKey.kt#L31
// iOS (v2026.2.1): https://github.com/bitwarden/ios/blob/cdd9ba1770ca2ffc098d02d12cc3208e3a830454/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
let mut feature_states = parse_experimental_client_feature_flags(
let feature_states = parse_experimental_client_feature_flags(
&CONFIG.experimental_client_feature_flags(),
FeatureFlagFilter::ValidOnly,
);
feature_states.insert("pm-19148-innovation-archive".to_string(), true);
// Add default feature_states here if needed, currently no features are needed by default.
Json(json!({
// Note: The clients use this version to handle backwards compatibility concerns
+39 -43
View File
@@ -20,8 +20,7 @@ use crate::{
DbConn,
},
mail,
sso::FAKE_SSO_IDENTIFIER,
util::{convert_json_key_lcase_first, NumberOrString},
util::{convert_json_key_lcase_first, get_uuid, NumberOrString},
CONFIG,
};
@@ -65,7 +64,6 @@ pub fn routes() -> Vec<Route> {
post_org_import,
list_policies,
list_policies_token,
get_dummy_master_password_policy,
get_master_password_policy,
get_policy,
put_policy,
@@ -101,7 +99,6 @@ pub fn routes() -> Vec<Route> {
get_billing_metadata,
get_billing_warnings,
get_auto_enroll_status,
get_self_host_billing_metadata,
]
}
@@ -356,7 +353,7 @@ async fn get_user_collections(headers: Headers, conn: DbConn) -> Json<Value> {
// The returned `Id` will then be passed to `get_master_password_policy` which will mainly ignore it
#[get("/organizations/<identifier>/auto-enroll-status")]
async fn get_auto_enroll_status(identifier: &str, headers: Headers, conn: DbConn) -> JsonResult {
let org = if identifier == FAKE_SSO_IDENTIFIER {
let org = if identifier == crate::sso::FAKE_IDENTIFIER {
match Membership::find_main_user_org(&headers.user.uuid, &conn).await {
Some(member) => Organization::find_by_uuid(&member.org_uuid, &conn).await,
None => None,
@@ -366,7 +363,7 @@ async fn get_auto_enroll_status(identifier: &str, headers: Headers, conn: DbConn
};
let (id, identifier, rp_auto_enroll) = match org {
None => (identifier.to_string(), identifier.to_string(), false),
None => (get_uuid(), identifier.to_string(), false),
Some(org) => (
org.uuid.to_string(),
org.uuid.to_string(),
@@ -907,21 +904,36 @@ async fn _get_org_details(
Ok(json!(ciphers_json))
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct OrgDomainDetails {
email: String,
}
// Returning a Domain/Organization here allow to prefill it and prevent prompting the user
// So we return a dummy value, since we only support a single SSO integration, and do not use the response anywhere
// So we either return an Org name associated to the user or a dummy value.
// In use since `v2025.6.0`, appears to use only the first `organizationIdentifier`
#[post("/organizations/domain/sso/verified")]
fn get_org_domain_sso_verified() -> JsonResult {
// Always return a dummy value, no matter if SSO is enabled or not
#[post("/organizations/domain/sso/verified", data = "<data>")]
async fn get_org_domain_sso_verified(data: Json<OrgDomainDetails>, conn: DbConn) -> JsonResult {
let data: OrgDomainDetails = data.into_inner();
let identifiers = match Organization::find_org_user_email(&data.email, &conn)
.await
.into_iter()
.map(|o| (o.name, o.uuid.to_string()))
.collect::<Vec<(String, String)>>()
{
v if !v.is_empty() => v,
_ => vec![(crate::sso::FAKE_IDENTIFIER.to_string(), crate::sso::FAKE_IDENTIFIER.to_string())],
};
Ok(Json(json!({
"object": "list",
"data": [{
"organizationIdentifier": FAKE_SSO_IDENTIFIER,
// These appear to be unused
"organizationName": FAKE_SSO_IDENTIFIER,
"domainName": CONFIG.domain()
}],
"continuationToken": null
"data": identifiers.into_iter().map(|(name, identifier)| json!({
"organizationName": name, // appear unused
"organizationIdentifier": identifier,
"domainName": CONFIG.domain(), // appear unused
})).collect::<Vec<Value>>()
})))
}
@@ -1448,7 +1460,7 @@ async fn _confirm_invite(
let save_result = member_to_confirm.save(conn).await;
if let Some(user) = User::find_by_uuid(&member_to_confirm.user_uuid, conn).await {
nt.send_user_update(UpdateType::SyncOrgKeys, &user, headers.device.push_uuid.as_ref(), conn).await;
nt.send_user_update(UpdateType::SyncOrgKeys, &user, &headers.device.push_uuid, conn).await;
}
save_result
@@ -1706,7 +1718,7 @@ async fn _delete_member(
.await;
if let Some(user) = User::find_by_uuid(&member_to_delete.user_uuid, conn).await {
nt.send_user_update(UpdateType::SyncOrgKeys, &user, headers.device.push_uuid.as_ref(), conn).await;
nt.send_user_update(UpdateType::SyncOrgKeys, &user, &headers.device.push_uuid, conn).await;
}
member_to_delete.delete(conn).await
@@ -1963,19 +1975,9 @@ async fn list_policies_token(org_id: OrganizationId, token: &str, conn: DbConn)
})))
}
// Called during the SSO enrollment return the default policy
#[get("/organizations/00000000-01DC-01DC-01DC-000000000000/policies/master-password", rank = 1)]
fn get_dummy_master_password_policy() -> JsonResult {
let (enabled, data) = match CONFIG.sso_master_password_policy_value() {
Some(policy) if CONFIG.sso_enabled() => (true, policy.to_string()),
_ => (false, "null".to_string()),
};
let policy = OrgPolicy::new(FAKE_SSO_IDENTIFIER.into(), OrgPolicyType::MasterPassword, enabled, data);
Ok(Json(policy.to_json()))
}
// Called during the SSO enrollment return the org policy if it exists
#[get("/organizations/<org_id>/policies/master-password", rank = 2)]
// Called during the SSO enrollment.
// Return the org policy if it exists, otherwise use the default one.
#[get("/organizations/<org_id>/policies/master-password", rank = 1)]
async fn get_master_password_policy(org_id: OrganizationId, _headers: OrgMemberHeaders, conn: DbConn) -> JsonResult {
let policy =
OrgPolicy::find_by_org_and_type(&org_id, OrgPolicyType::MasterPassword, &conn).await.unwrap_or_else(|| {
@@ -1990,7 +1992,7 @@ async fn get_master_password_policy(org_id: OrganizationId, _headers: OrgMemberH
Ok(Json(policy.to_json()))
}
#[get("/organizations/<org_id>/policies/<pol_type>", rank = 3)]
#[get("/organizations/<org_id>/policies/<pol_type>", rank = 2)]
async fn get_policy(org_id: OrganizationId, pol_type: i32, headers: AdminHeaders, conn: DbConn) -> JsonResult {
if org_id != headers.org_id {
err!("Organization not found", "Organization id's do not match");
@@ -2199,15 +2201,6 @@ fn get_billing_warnings(_org_id: OrganizationId, _headers: OrgMemberHeaders) ->
}))
}
#[get("/organizations/<_org_id>/billing/vnext/self-host/metadata")]
fn get_self_host_billing_metadata(_org_id: OrganizationId, _headers: OrgMemberHeaders) -> Json<Value> {
// Prevent a 404 error, which also causes Javascript errors.
Json(json!({
"isOnSecretsManagerStandalone": false, // Secrets Manager is not supported by Vaultwarden
"organizationOccupiedSeats": 0 // Vaultwarden does not count seats
}))
}
fn _empty_data_json() -> Value {
json!({
"object": "list",
@@ -3034,7 +3027,10 @@ async fn put_reset_password_enrollment(
err!("User to enroll isn't member of required organization", "The user_id and acting user do not match");
}
let mut membership = headers.membership;
let Some(mut membership) = Membership::find_confirmed_by_user_and_org(&headers.user.uuid, &org_id, &conn).await
else {
err!("User to enroll isn't member of required organization")
};
check_reset_password_applicable(&org_id, &conn).await?;
+1 -1
View File
@@ -574,7 +574,7 @@ async fn download_url(host: &Host, send_id: &SendId, file_id: &SendFileId) -> Re
Ok(format!("{}/api/sends/{send_id}/{file_id}?t={token}", &host.host))
} else {
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_mins(5)).await?.uri().to_string())
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_secs(5 * 60)).await?.uri().to_string())
}
}
+1 -1
View File
@@ -38,7 +38,7 @@ static WEBAUTHN: LazyLock<Webauthn> = LazyLock::new(|| {
let webauthn = WebauthnBuilder::new(&rp_id, &rp_origin)
.expect("Creating WebauthnBuilder failed")
.rp_name(&domain)
.timeout(Duration::from_mins(1));
.timeout(Duration::from_millis(60000));
webauthn.build().expect("Building Webauthn failed")
});
+73 -53
View File
@@ -19,7 +19,7 @@ use svg_hush::{data_url_filter, Filter};
use crate::{
config::PathType,
error::Error,
http_client::{get_reqwest_client_builder, get_valid_host, should_block_host, CustomHttpClientError},
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
util::Cached,
CONFIG,
};
@@ -81,19 +81,19 @@ static ICON_SIZE_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"(?x)(\d+
// The function name `icon_external` is checked in the `on_response` function in `AppHeaders`
// It is used to prevent sending a specific header which breaks icon downloads.
// If this function needs to be renamed, also adjust the code in `util.rs`
#[get("/<host>/icon.png")]
fn icon_external(host: &str) -> Cached<Option<Redirect>> {
let Ok(host) = get_valid_host(host) else {
warn!("Invalid host: {host}");
return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
};
if should_block_host(&host).is_err() {
warn!("Blocked address: {host}");
#[get("/<domain>/icon.png")]
fn icon_external(domain: &str) -> Cached<Option<Redirect>> {
if !is_valid_domain(domain) {
warn!("Invalid domain: {domain}");
return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
}
let url = CONFIG._icon_service_url().replace("{}", &host.to_string());
if should_block_address(domain) {
warn!("Blocked address: {domain}");
return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
}
let url = CONFIG._icon_service_url().replace("{}", domain);
let redir = match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect
@@ -107,21 +107,12 @@ fn icon_external(host: &str) -> Cached<Option<Redirect>> {
Cached::ttl(redir, CONFIG.icon_cache_ttl(), true)
}
#[get("/<host>/icon.png")]
async fn icon_internal(host: &str) -> Cached<(ContentType, Vec<u8>)> {
#[get("/<domain>/icon.png")]
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
let Ok(host) = get_valid_host(host) else {
warn!("Invalid host: {host}");
return Cached::ttl(
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
CONFIG.icon_cache_negttl(),
true,
);
};
if should_block_host(&host).is_err() {
warn!("Blocked address: {host}");
if !is_valid_domain(domain) {
warn!("Invalid domain: {domain}");
return Cached::ttl(
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
CONFIG.icon_cache_negttl(),
@@ -129,7 +120,16 @@ async fn icon_internal(host: &str) -> Cached<(ContentType, Vec<u8>)> {
);
}
match get_icon(&host.to_string()).await {
if should_block_address(domain) {
warn!("Blocked address: {domain}");
return Cached::ttl(
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
CONFIG.icon_cache_negttl(),
true,
);
}
match get_icon(domain).await {
Some((icon, icon_type)) => {
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
}
@@ -137,6 +137,42 @@ async fn icon_internal(host: &str) -> Cached<(ContentType, Vec<u8>)> {
}
}
/// Returns if the domain provided is valid or not.
///
/// This does some manual checks and makes use of Url to do some basic checking.
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
fn is_valid_domain(domain: &str) -> bool {
const ALLOWED_CHARS: &str = "-.";
// If parsing the domain fails using Url, it will not work with reqwest.
if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) {
debug!("Domain parse error: '{domain}' - {parse_error:?}");
return false;
} else if domain.is_empty()
|| domain.contains("..")
|| domain.starts_with('.')
|| domain.starts_with('-')
|| domain.ends_with('-')
{
debug!(
"Domain validation error: '{domain}' is either empty, contains '..', starts with an '.', starts or ends with a '-'"
);
return false;
} else if domain.len() > 255 {
debug!("Domain validation error: '{domain}' exceeds 255 characters");
return false;
}
for c in domain.chars() {
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
debug!("Domain validation error: '{domain}' contains an invalid character '{c}'");
return false;
}
}
true
}
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
let path = format!("{domain}.png");
@@ -331,7 +367,7 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
tld = domain_parts.next_back().unwrap(),
base = domain_parts.next_back().unwrap()
);
if get_valid_host(&base_domain).is_ok() {
if is_valid_domain(&base_domain) {
let sslbase = format!("https://{base_domain}");
let httpbase = format!("http://{base_domain}");
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
@@ -342,7 +378,7 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
let www_domain = format!("www.{domain}");
if get_valid_host(&www_domain).is_ok() {
if is_valid_domain(&www_domain) {
let sslwww = format!("https://{www_domain}");
let httpwww = format!("http://{www_domain}");
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
@@ -496,8 +532,7 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
use data_url::DataUrl;
let mut icons = icon_result.iconlist.iter().take(5).peekable();
while let Some(icon) = icons.next() {
for icon in icon_result.iconlist.iter().take(5) {
if icon.href.starts_with("data:image") {
let Ok(datauri) = DataUrl::process(&icon.href) else {
continue;
@@ -525,23 +560,11 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
_ => debug!("Extracted icon from data:image uri is invalid"),
};
} else {
debug!("Trying {}", icon.href);
// Make sure all icons are checked before returning error
let res = match get_page_with_referer(&icon.href, &icon_result.referer).await {
Ok(r) => r,
Err(e) if icons.peek().is_none() => return Err(e),
Err(e) if CustomHttpClientError::downcast_ref(&e).is_some() => return Err(e), // If blacklisted stop immediately instead of checking the rest of the icons. see explanation and actual handling inside get_icon()
Err(e) => {
warn!("Unable to download icon: {e:?}");
// Continue to next icon
continue;
}
};
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
// Check if the icon type is allowed, else try another icon from the list.
// Check if the icon type is allowed, else try an icon from the list.
icon_type = get_icon_type(&buffer);
if icon_type.is_none() {
buffer.clear();
@@ -595,17 +618,14 @@ fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
None
}
// Some details can be found here:
// - https://www.garykessler.net/library/file_sigs_GCK_latest.html
// - https://en.wikipedia.org/wiki/List_of_file_signatures
match bytes {
[137, 80, 78, 71, 13, 10, 26, 10, ..] => Some("png"),
[0, 0, 1, 0, n1, n2, ..] if u16::from_le_bytes([*n1, *n2]) > 0 => Some("x-icon"), // https://en.wikipedia.org/wiki/ICO_(file_format)
[82, 73, 70, 70, _, _, _, _, 87, 69, 66, 80, ..] => Some("webp"), // Only match WebP Images
[255, 216, 255, b, ..] if *b >= 0xC0 => Some("jpeg"),
[71, 73, 70, 56, 55 | 57, 97, ..] => Some("gif"),
[66, 77, _, _, _, _, 0, 0, 0, 0, ..] => Some("bmp"), // https://en.wikipedia.org/wiki/BMP_file_format
[60, 115, 118, 103, ..] => Some("svg+xml"), // Normal svg
[137, 80, 78, 71, ..] => Some("png"),
[0, 0, 1, 0, ..] => Some("x-icon"),
[82, 73, 70, 70, ..] => Some("webp"),
[255, 216, 255, ..] => Some("jpeg"),
[71, 73, 70, 56, ..] => Some("gif"),
[66, 77, ..] => Some("bmp"),
[60, 115, 118, 103, ..] => Some("svg+xml"), // Normal svg
[60, 63, 120, 109, 108, ..] => check_svg_after_xml_declaration(bytes), // An svg starting with <?xml
_ => None,
}
+41 -115
View File
@@ -2,7 +2,7 @@ use chrono::Utc;
use num_traits::FromPrimitive;
use rocket::{
form::{Form, FromForm},
http::{Cookie, CookieJar, SameSite},
http::Status,
response::Redirect,
serde::json::Json,
Route,
@@ -12,7 +12,7 @@ use serde_json::Value;
use crate::{
api::{
core::{
accounts::{_prelogin, _register, kdf_upgrade, PreloginData, RegisterData},
accounts::{PreloginData, RegisterData, _prelogin, _register, kdf_upgrade},
log_user_event,
two_factor::{
authenticator, duo, duo_oidc, email, enforce_2fa_policy, is_twofactor_provider_usable, webauthn,
@@ -24,8 +24,7 @@ use crate::{
ApiResult, EmptyResult, JsonResult,
},
auth,
auth::{generate_organization_api_key_login_claims, AuthMethod, ClientHeaders, ClientIp, ClientVersion, Secure},
crypto,
auth::{generate_organization_api_key_login_claims, AuthMethod, ClientHeaders, ClientIp, ClientVersion},
db::{
models::{
AuthRequest, AuthRequestId, Device, DeviceId, EventType, Invitation, OIDCCodeWrapper, OrganizationApiKey,
@@ -43,7 +42,6 @@ pub fn routes() -> Vec<Route> {
routes![
login,
prelogin,
prelogin_password,
identity_register,
register_verification_email,
register_finish,
@@ -67,43 +65,43 @@ async fn login(
let login_result = match data.grant_type.as_ref() {
"refresh_token" => {
_check_is_some(data.refresh_token.as_ref(), "refresh_token cannot be blank")?;
_check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
_refresh_login(data, &conn, &client_header.ip).await
}
"password" if CONFIG.sso_enabled() && CONFIG.sso_only() => err!("SSO sign-in is required"),
"password" => {
_check_is_some(data.client_id.as_ref(), "client_id cannot be blank")?;
_check_is_some(data.password.as_ref(), "password cannot be blank")?;
_check_is_some(data.scope.as_ref(), "scope cannot be blank")?;
_check_is_some(data.username.as_ref(), "username cannot be blank")?;
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.password, "password cannot be blank")?;
_check_is_some(&data.scope, "scope cannot be blank")?;
_check_is_some(&data.username, "username cannot be blank")?;
_check_is_some(data.device_identifier.as_ref(), "device_identifier cannot be blank")?;
_check_is_some(data.device_name.as_ref(), "device_name cannot be blank")?;
_check_is_some(data.device_type.as_ref(), "device_type cannot be blank")?;
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
_password_login(data, &mut user_id, &conn, &client_header.ip, client_version.as_ref()).await
_password_login(data, &mut user_id, &conn, &client_header.ip, &client_version).await
}
"client_credentials" => {
_check_is_some(data.client_id.as_ref(), "client_id cannot be blank")?;
_check_is_some(data.client_secret.as_ref(), "client_secret cannot be blank")?;
_check_is_some(data.scope.as_ref(), "scope cannot be blank")?;
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.client_secret, "client_secret cannot be blank")?;
_check_is_some(&data.scope, "scope cannot be blank")?;
_check_is_some(data.device_identifier.as_ref(), "device_identifier cannot be blank")?;
_check_is_some(data.device_name.as_ref(), "device_name cannot be blank")?;
_check_is_some(data.device_type.as_ref(), "device_type cannot be blank")?;
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
_api_key_login(data, &mut user_id, &conn, &client_header.ip).await
}
"authorization_code" if CONFIG.sso_enabled() => {
_check_is_some(data.client_id.as_ref(), "client_id cannot be blank")?;
_check_is_some(data.code.as_ref(), "code cannot be blank")?;
_check_is_some(data.code_verifier.as_ref(), "code verifier cannot be blank")?;
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.code, "code cannot be blank")?;
_check_is_some(&data.code_verifier, "code verifier cannot be blank")?;
_check_is_some(data.device_identifier.as_ref(), "device_identifier cannot be blank")?;
_check_is_some(data.device_name.as_ref(), "device_name cannot be blank")?;
_check_is_some(data.device_type.as_ref(), "device_type cannot be blank")?;
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
_sso_login(data, &mut user_id, &conn, &client_header.ip, client_version.as_ref()).await
_sso_login(data, &mut user_id, &conn, &client_header.ip, &client_version).await
}
"authorization_code" => err!("SSO sign-in is not available"),
t => err!("Invalid type", t),
@@ -133,14 +131,12 @@ async fn login(
login_result
}
// Return Status::Unauthorized to trigger logout
async fn _refresh_login(data: ConnectData, conn: &DbConn, ip: &ClientIp) -> JsonResult {
// When a refresh token is invalid or missing we need to respond with an HTTP BadRequest (400)
// It also needs to return a json which holds at least a key `error` with the value `invalid_grant`
// See the link below for details
// https://github.com/bitwarden/clients/blob/2ee158e720a5e7dbe3641caf80b569e97a1dd91b/libs/common/src/services/api.service.ts#L1786-L1797
let Some(refresh_token) = data.refresh_token else {
err_json!(json!({"error": "invalid_grant"}), "Missing refresh_token")
// Extract token
let refresh_token = match data.refresh_token {
Some(token) => token,
None => err_code!("Missing refresh_token", Status::Unauthorized.code),
};
// ---
@@ -151,10 +147,7 @@ async fn _refresh_login(data: ConnectData, conn: &DbConn, ip: &ClientIp) -> Json
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
match auth::refresh_tokens(ip, &refresh_token, data.client_id, conn).await {
Err(err) => {
err_json!(
json!({"error": "invalid_grant"}),
format!("Unable to refresh login credentials: {}", err.message())
)
err_code!(format!("Unable to refresh login credentials: {}", err.message()), Status::Unauthorized.code)
}
Ok((mut device, auth_tokens)) => {
// Save to update `device.updated_at` to track usage and toggle new status
@@ -179,7 +172,7 @@ async fn _sso_login(
user_id: &mut Option<UserId>,
conn: &DbConn,
ip: &ClientIp,
client_version: Option<&ClientVersion>,
client_version: &Option<ClientVersion>,
) -> JsonResult {
AuthMethod::Sso.check_scope(data.scope.as_ref())?;
@@ -230,33 +223,7 @@ async fn _sso_login(
}
)
}
Some((user, None)) => match user_infos.email_verified {
None if !CONFIG.sso_allow_unknown_email_verification() => {
error!(
"Login failure ({}), existing non SSO user ({}) with same email ({}) and email verification status is unknown",
user_infos.identifier, user.uuid, user.email
);
err_silent!(
"Email verification status is unknown",
ErrorEvent {
event: EventType::UserFailedLogIn
}
)
}
Some(false) => {
error!(
"Login failure ({}), existing non SSO user ({}) with same email ({}) and email is not verified",
user_infos.identifier, user.uuid, user.email
);
err_silent!(
"Email is not verified by the SSO provider",
ErrorEvent {
event: EventType::UserFailedLogIn
}
)
}
_ => Some((user, None)),
},
Some((user, None)) => Some((user, None)),
},
Some((user, sso_user)) => Some((user, Some(sso_user))),
};
@@ -348,7 +315,7 @@ async fn _password_login(
user_id: &mut Option<UserId>,
conn: &DbConn,
ip: &ClientIp,
client_version: Option<&ClientVersion>,
client_version: &Option<ClientVersion>,
) -> JsonResult {
// Validate scope
AuthMethod::Password.check_scope(data.scope.as_ref())?;
@@ -762,7 +729,7 @@ async fn twofactor_auth(
data: &ConnectData,
device: &mut Device,
ip: &ClientIp,
client_version: Option<&ClientVersion>,
client_version: &Option<ClientVersion>,
conn: &DbConn,
) -> ApiResult<Option<String>> {
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
@@ -787,10 +754,7 @@ async fn twofactor_auth(
}
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
// Ignore Remember and RecoveryCode Types during this check, these are special
if ![TwoFactorType::Remember as i32, TwoFactorType::RecoveryCode as i32].contains(&selected_id)
&& !twofactor_ids.contains(&selected_id)
{
if !twofactor_ids.contains(&selected_id) {
err_json!(
_json_err_twofactor(&twofactor_ids, &user.uuid, data, client_version, conn).await?,
"Invalid two factor provider"
@@ -907,7 +871,7 @@ async fn _json_err_twofactor(
providers: &[i32],
user_id: &UserId,
data: &ConnectData,
client_version: Option<&ClientVersion>,
client_version: &Option<ClientVersion>,
conn: &DbConn,
) -> ApiResult<Value> {
let mut result = json!({
@@ -1011,11 +975,6 @@ async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await
}
#[post("/accounts/prelogin/password", data = "<data>")]
async fn prelogin_password(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await
}
#[post("/accounts/register", data = "<data>")]
async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
_register(data, false, conn).await
@@ -1142,7 +1101,7 @@ struct ConnectData {
#[field(name = uncased("code_verifier"))]
code_verifier: Option<OIDCCodeVerifier>,
}
fn _check_is_some<T>(value: Option<&T>, msg: &str) -> EmptyResult {
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
if value.is_none() {
err!(msg)
}
@@ -1161,16 +1120,13 @@ fn prevalidate() -> JsonResult {
}
}
const SSO_BINDING_COOKIE: &str = "VW_SSO_BINDING";
#[get("/connect/oidc-signin?<code>&<state>", rank = 1)]
async fn oidcsignin(code: OIDCCode, state: String, cookies: &CookieJar<'_>, mut conn: DbConn) -> ApiResult<Redirect> {
async fn oidcsignin(code: OIDCCode, state: String, mut conn: DbConn) -> ApiResult<Redirect> {
_oidcsignin_redirect(
state,
OIDCCodeWrapper::Ok {
code,
},
cookies,
&mut conn,
)
.await
@@ -1183,7 +1139,6 @@ async fn oidcsignin_error(
state: String,
error: String,
error_description: Option<String>,
cookies: &CookieJar<'_>,
mut conn: DbConn,
) -> ApiResult<Redirect> {
_oidcsignin_redirect(
@@ -1192,7 +1147,6 @@ async fn oidcsignin_error(
error,
error_description,
},
cookies,
&mut conn,
)
.await
@@ -1204,7 +1158,6 @@ async fn oidcsignin_error(
async fn _oidcsignin_redirect(
base64_state: String,
code_response: OIDCCodeWrapper,
cookies: &CookieJar<'_>,
conn: &mut DbConn,
) -> ApiResult<Redirect> {
let state = sso::decode_state(&base64_state)?;
@@ -1213,17 +1166,6 @@ async fn _oidcsignin_redirect(
None => err!(format!("Cannot retrieve sso_auth for {state}")),
Some(sso_auth) => sso_auth,
};
// Browser-binding check
// The cookie was set on /connect/authorize and must come from the same browser that initiated the flow.
let cookie_value = cookies.get(SSO_BINDING_COOKIE).map(|c| c.value().to_string());
let provided_hash = cookie_value.as_deref().map(|v| crypto::sha256_hex(v.as_bytes()));
match (sso_auth.binding_hash.as_deref(), provided_hash.as_deref()) {
(Some(expected), Some(actual)) if crypto::ct_eq(expected, actual) => {}
_ => err!(format!("SSO session binding mismatch for {state}")),
}
cookies.remove(Cookie::build(SSO_BINDING_COOKIE).path("/identity/connect/").build());
sso_auth.code_response = Some(code_response);
sso_auth.updated_at = Utc::now().naive_utc();
sso_auth.save(conn).await?;
@@ -1270,7 +1212,7 @@ struct AuthorizeData {
// The `redirect_uri` will change depending of the client (web, android, ios ..)
#[get("/connect/authorize?<data..>")]
async fn authorize(data: AuthorizeData, cookies: &CookieJar<'_>, secure: Secure, conn: DbConn) -> ApiResult<Redirect> {
async fn authorize(data: AuthorizeData, conn: DbConn) -> ApiResult<Redirect> {
let AuthorizeData {
client_id,
redirect_uri,
@@ -1284,23 +1226,7 @@ async fn authorize(data: AuthorizeData, cookies: &CookieJar<'_>, secure: Secure,
err!("Unsupported code challenge method");
}
// Generate browser-binding token. Stored hashed in DB; raw value handed to the browser as a cookie.
// Validated on /connect/oidc-signin
let binding_token = data_encoding::BASE64URL_NOPAD.encode(&crypto::get_random_bytes::<32>());
let binding_hash = crypto::sha256_hex(binding_token.as_bytes());
let auth_url =
sso::authorize_url(state, code_challenge, &client_id, &redirect_uri, Some(binding_hash), conn).await?;
cookies.add(
Cookie::build((SSO_BINDING_COOKIE, binding_token))
.path("/identity/connect/")
.max_age(time::Duration::seconds(sso::SSO_AUTH_EXPIRATION.num_seconds()))
.same_site(SameSite::Lax) // Lax is needed because the IdP runs on a different FQDN
.http_only(true)
.secure(secure.https)
.build(),
);
let auth_url = sso::authorize_url(state, code_challenge, &client_id, &redirect_uri, conn).await?;
Ok(Redirect::temporary(String::from(auth_url)))
}
+1 -1
View File
@@ -338,7 +338,7 @@ impl WebSocketUsers {
}
// NOTE: The last modified date needs to be updated before calling these methods
pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: Option<&PushId>, conn: &DbConn) {
pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &DbConn) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
+2 -2
View File
@@ -135,7 +135,7 @@ pub async fn register_push_device(device: &mut Device, conn: &DbConn) -> EmptyRe
Ok(())
}
pub async fn unregister_push_device(push_id: Option<&PushId>) -> EmptyResult {
pub async fn unregister_push_device(push_id: &Option<PushId>) -> EmptyResult {
if !CONFIG.push_enabled() || push_id.is_none() {
return Ok(());
}
@@ -206,7 +206,7 @@ pub async fn push_logout(user: &User, acting_device: Option<&Device>, conn: &DbC
}
}
pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: Option<&PushId>, conn: &DbConn) {
pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &DbConn) {
if Device::check_user_has_push_device(&user.uuid, conn).await {
tokio::task::spawn(send_to_push_relay(json!({
"userId": user.uuid,
+3 -3
View File
@@ -1076,7 +1076,7 @@ fn validate_config(cfg: &ConfigItems, on_update: bool) -> Result<(), Error> {
validate_internal_sso_issuer_url(&cfg.sso_authority)?;
validate_internal_sso_redirect_url(&cfg.sso_callback_path)?;
validate_sso_master_password_policy(cfg.sso_master_password_policy.as_ref())?;
validate_sso_master_password_policy(&cfg.sso_master_password_policy)?;
}
if cfg._enable_yubico {
@@ -1271,7 +1271,7 @@ fn validate_internal_sso_redirect_url(sso_callback_path: &String) -> Result<open
}
fn validate_sso_master_password_policy(
sso_master_password_policy: Option<&String>,
sso_master_password_policy: &Option<String>,
) -> Result<Option<serde_json::Value>, Error> {
let policy = sso_master_password_policy.as_ref().map(|mpp| serde_json::from_str::<serde_json::Value>(mpp));
@@ -1725,7 +1725,7 @@ impl Config {
}
pub fn sso_master_password_policy_value(&self) -> Option<serde_json::Value> {
validate_sso_master_password_policy(self.sso_master_password_policy().as_ref()).ok().flatten()
validate_sso_master_password_policy(&self.sso_master_password_policy()).ok().flatten()
}
pub fn sso_scopes_vec(&self) -> Vec<String> {
-7
View File
@@ -113,10 +113,3 @@ pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
use subtle::ConstantTimeEq;
a.as_ref().ct_eq(b.as_ref()).into()
}
//
// SHA256
//
pub fn sha256_hex(data: &[u8]) -> String {
HEXLOWER.encode(digest::digest(&digest::SHA256, data).as_ref())
}
-91
View File
@@ -1,91 +0,0 @@
use chrono::NaiveDateTime;
use diesel::prelude::*;
use super::{CipherId, User, UserId};
use crate::api::EmptyResult;
use crate::db::schema::archives;
use crate::db::DbConn;
use crate::error::MapResult;
#[derive(Identifiable, Queryable, Insertable)]
#[diesel(table_name = archives)]
#[diesel(primary_key(user_uuid, cipher_uuid))]
pub struct Archive {
pub user_uuid: UserId,
pub cipher_uuid: CipherId,
pub archived_at: NaiveDateTime,
}
impl Archive {
// Returns the date the specified cipher was archived
pub async fn get_archived_at(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
db_run! { conn: {
archives::table
.filter(archives::cipher_uuid.eq(cipher_uuid))
.filter(archives::user_uuid.eq(user_uuid))
.select(archives::archived_at)
.first::<NaiveDateTime>(conn).ok()
}}
}
// Saves (inserts or updates) an archive record with the provided timestamp
pub async fn save(
user_uuid: &UserId,
cipher_uuid: &CipherId,
archived_at: NaiveDateTime,
conn: &DbConn,
) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn:
sqlite, mysql {
diesel::replace_into(archives::table)
.values((
archives::user_uuid.eq(user_uuid),
archives::cipher_uuid.eq(cipher_uuid),
archives::archived_at.eq(archived_at),
))
.execute(conn)
.map_res("Error saving archive")
}
postgresql {
diesel::insert_into(archives::table)
.values((
archives::user_uuid.eq(user_uuid),
archives::cipher_uuid.eq(cipher_uuid),
archives::archived_at.eq(archived_at),
))
.on_conflict((archives::user_uuid, archives::cipher_uuid))
.do_update()
.set(archives::archived_at.eq(archived_at))
.execute(conn)
.map_res("Error saving archive")
}
}
}
// Deletes an archive record for a specific cipher
pub async fn delete_by_cipher(user_uuid: &UserId, cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn: {
diesel::delete(
archives::table
.filter(archives::user_uuid.eq(user_uuid))
.filter(archives::cipher_uuid.eq(cipher_uuid))
)
.execute(conn)
.map_res("Error deleting archive")
}}
}
/// Return a vec with (cipher_uuid, archived_at)
/// This is used during a full sync so we only need one query for all archive matches
pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, NaiveDateTime)> {
db_run! { conn: {
archives::table
.filter(archives::user_uuid.eq(user_uuid))
.select((archives::cipher_uuid, archives::archived_at))
.load::<(CipherId, NaiveDateTime)>(conn)
.unwrap_or_default()
}}
}
}
+1 -1
View File
@@ -50,7 +50,7 @@ impl Attachment {
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
Ok(format!("{host}/attachments/{}/{}?token={token}", self.cipher_uuid, self.id))
} else {
Ok(operator.presign_read(&self.get_file_path(), Duration::from_mins(5)).await?.uri().to_string())
Ok(operator.presign_read(&self.get_file_path(), Duration::from_secs(5 * 60)).await?.uri().to_string())
}
}
+3 -20
View File
@@ -10,8 +10,8 @@ use diesel::prelude::*;
use serde_json::Value;
use super::{
Archive, Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership,
MembershipStatus, MembershipType, OrganizationId, User, UserId,
Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus,
MembershipType, OrganizationId, User, UserId,
};
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
use macros::UuidFromParam;
@@ -380,11 +380,6 @@ impl Cipher {
} else {
self.is_favorite(user_uuid, conn).await
});
json_object["archivedDate"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_archives.get(&self.uuid).map_or(Value::Null, |d| Value::String(format_date(d)))
} else {
self.get_archived_at(user_uuid, conn).await.map_or(Value::Null, |d| Value::String(format_date(&d)))
});
// These values are true by default, but can be false if the
// cipher belongs to a collection or group where the org owner has enabled
// the "Read Only" or "Hide Passwords" restrictions for the user.
@@ -403,7 +398,7 @@ impl Cipher {
3 => "card",
4 => "identity",
5 => "sshKey",
_ => err!(format!("Cipher {} has an invalid type {}", self.uuid, self.atype)),
_ => panic!("Wrong type"),
};
json_object[key] = type_data_json;
@@ -747,18 +742,6 @@ impl Cipher {
}
}
pub async fn get_archived_at(&self, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
Archive::get_archived_at(&self.uuid, user_uuid, conn).await
}
pub async fn set_archived_at(&self, archived_at: NaiveDateTime, user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
Archive::save(user_uuid, &self.uuid, archived_at, conn).await
}
pub async fn unarchive(&self, user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
Archive::delete_by_cipher(user_uuid, &self.uuid, conn).await
}
pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option<FolderId> {
db_run! { conn: {
folders_ciphers::table
+1 -4
View File
@@ -25,7 +25,7 @@ pub struct Device {
pub user_uuid: UserId,
pub name: String,
pub atype: i32, // https://github.com/bitwarden/server/blob/8d547dcc280babab70dd4a3c94ced6a34b12dfbf/src/Core/Enums/DeviceType.cs
pub atype: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs
pub push_uuid: Option<PushId>,
pub push_token: Option<String>,
@@ -332,8 +332,6 @@ pub enum DeviceType {
MacOsCLI = 24,
#[display("Linux CLI")]
LinuxCLI = 25,
#[display("DuckDuckGo")]
DuckDuckGoBrowser = 26,
}
impl DeviceType {
@@ -365,7 +363,6 @@ impl DeviceType {
23 => DeviceType::WindowsCLI,
24 => DeviceType::MacOsCLI,
25 => DeviceType::LinuxCLI,
26 => DeviceType::DuckDuckGoBrowser,
_ => DeviceType::UnknownBrowser,
}
}
+3 -2
View File
@@ -85,8 +85,7 @@ impl EmergencyAccess {
pub async fn to_json_grantee_details(&self, conn: &DbConn) -> Option<Value> {
let grantee_user = if let Some(grantee_uuid) = &self.grantee_uuid {
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
} else {
let email = self.email.as_deref()?;
} else if let Some(email) = self.email.as_deref() {
match User::find_by_mail(email, conn).await {
Some(user) => user,
None => {
@@ -95,6 +94,8 @@ impl EmergencyAccess {
return None;
}
}
} else {
return None;
};
Some(json!({
-2
View File
@@ -1,4 +1,3 @@
mod archive;
mod attachment;
mod auth_request;
mod cipher;
@@ -18,7 +17,6 @@ mod two_factor_duo_context;
mod two_factor_incomplete;
mod user;
pub use self::archive::Archive;
pub use self::attachment::{Attachment, AttachmentId};
pub use self::auth_request::{AuthRequest, AuthRequestId};
pub use self::cipher::{Cipher, CipherId, RepromptType};
+1 -9
View File
@@ -54,18 +54,11 @@ pub struct SsoAuth {
pub auth_response: Option<OIDCAuthenticatedUser>,
pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime,
pub binding_hash: Option<String>,
}
/// Local methods
impl SsoAuth {
pub fn new(
state: OIDCState,
client_challenge: OIDCCodeChallenge,
nonce: String,
redirect_uri: String,
binding_hash: Option<String>,
) -> Self {
pub fn new(state: OIDCState, client_challenge: OIDCCodeChallenge, nonce: String, redirect_uri: String) -> Self {
let now = Utc::now().naive_utc();
SsoAuth {
@@ -77,7 +70,6 @@ impl SsoAuth {
updated_at: now,
code_response: None,
auth_response: None,
binding_hash,
}
}
}
-12
View File
@@ -265,7 +265,6 @@ table! {
auth_response -> Nullable<Text>,
created_at -> Timestamp,
updated_at -> Timestamp,
binding_hash -> Nullable<Text>,
}
}
@@ -342,16 +341,6 @@ table! {
}
}
table! {
archives (user_uuid, cipher_uuid) {
user_uuid -> Text,
cipher_uuid -> Text,
archived_at -> Timestamp,
}
}
joinable!(archives -> users (user_uuid));
joinable!(archives -> ciphers (cipher_uuid));
joinable!(attachments -> ciphers (cipher_uuid));
joinable!(ciphers -> organizations (organization_uuid));
joinable!(ciphers -> users (user_uuid));
@@ -383,7 +372,6 @@ joinable!(auth_requests -> users (user_uuid));
joinable!(sso_users -> users (user_uuid));
allow_tables_to_appear_in_same_query!(
archives,
attachments,
ciphers,
ciphers_collections,
+37 -290
View File
@@ -1,11 +1,12 @@
use std::{
fmt,
net::{IpAddr, SocketAddr},
str::FromStr,
sync::{Arc, LazyLock, Mutex},
time::Duration,
};
use hickory_resolver::{net::runtime::TokioRuntimeProvider, TokioResolver};
use hickory_resolver::{name_server::TokioConnectionProvider, TokioResolver};
use regex::Regex;
use reqwest::{
dns::{Name, Resolve, Resolving},
@@ -58,6 +59,16 @@ pub fn get_reqwest_client_builder() -> ClientBuilder {
.timeout(Duration::from_secs(10))
}
pub fn should_block_address(domain_or_ip: &str) -> bool {
if let Ok(ip) = IpAddr::from_str(domain_or_ip) {
if should_block_ip(ip) {
return true;
}
}
should_block_address_regex(domain_or_ip)
}
fn should_block_ip(ip: IpAddr) -> bool {
if !CONFIG.http_request_block_non_global_ips() {
return false;
@@ -89,54 +100,11 @@ fn should_block_address_regex(domain_or_ip: &str) -> bool {
is_match
}
pub fn get_valid_host(host: &str) -> Result<Host, CustomHttpClientError> {
let Ok(host) = Host::parse(host) else {
return Err(CustomHttpClientError::Invalid {
domain: host.to_string(),
});
};
// Some extra checks to validate hosts
match host {
Host::Domain(ref domain) => {
// Host::parse() does not verify length or all possible invalid characters
// We do some extra checks here to prevent issues
if domain.len() > 253 {
debug!("Domain validation error: '{domain}' exceeds 253 characters");
return Err(CustomHttpClientError::Invalid {
domain: host.to_string(),
});
}
if !domain.split('.').all(|label| {
!label.is_empty()
// Labels can't be longer than 63 chars
&& label.len() <= 63
// Labels are not allowed to start or end with a hyphen `-`
&& !label.starts_with('-')
&& !label.ends_with('-')
// Only ASCII Alphanumeric characters are allowed
// We already received a punycoded domain back, so no unicode should exists here
&& label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
}) {
debug!(
"Domain validation error: '{domain}' labels contain invalid characters or exceed the maximum length"
);
return Err(CustomHttpClientError::Invalid {
domain: host.to_string(),
});
}
}
Host::Ipv4(_) | Host::Ipv6(_) => {}
}
Ok(host)
}
pub fn should_block_host<S: AsRef<str>>(host: &Host<S>) -> Result<(), CustomHttpClientError> {
fn should_block_host(host: &Host<&str>) -> Result<(), CustomHttpClientError> {
let (ip, host_str): (Option<IpAddr>, String) = match host {
Host::Ipv4(ip) => (Some(IpAddr::V4(*ip)), ip.to_string()),
Host::Ipv6(ip) => (Some(IpAddr::V6(*ip)), ip.to_string()),
Host::Domain(d) => (None, d.as_ref().to_string()),
Host::Domain(d) => (None, (*d).to_string()),
};
if let Some(ip) = ip {
@@ -166,9 +134,6 @@ pub enum CustomHttpClientError {
domain: Option<String>,
ip: IpAddr,
},
Invalid {
domain: String,
},
}
impl CustomHttpClientError {
@@ -190,7 +155,7 @@ impl fmt::Display for CustomHttpClientError {
match self {
Self::Blocked {
domain,
} => write!(f, "Blocked domain: '{domain}' matched HTTP_REQUEST_BLOCK_REGEX"),
} => write!(f, "Blocked domain: {domain} matched HTTP_REQUEST_BLOCK_REGEX"),
Self::NonGlobalIp {
domain: Some(domain),
ip,
@@ -198,10 +163,7 @@ impl fmt::Display for CustomHttpClientError {
Self::NonGlobalIp {
domain: None,
ip,
} => write!(f, "IP '{ip}' is not a global IP!"),
Self::Invalid {
domain,
} => write!(f, "Invalid host: '{domain}' contains invalid characters or exceeds the maximum length"),
} => write!(f, "IP {ip} is not a global IP!"),
}
}
}
@@ -222,46 +184,40 @@ impl CustomDnsResolver {
}
fn new() -> Arc<Self> {
TokioResolver::builder(TokioRuntimeProvider::default())
.and_then(|mut builder| {
// Hickory's default since v0.26 is `Ipv6AndIpv4`, which sorts IPv6 first
// This might cause issues on IPv4 only systems or containers
// Unless someone enabled DNS_PREFER_IPV6, use Ipv4AndIpv6, which returns IPv4 first which was our previous default
if !CONFIG.dns_prefer_ipv6() {
builder.options_mut().ip_strategy = hickory_resolver::config::LookupIpStrategy::Ipv4AndIpv6;
match TokioResolver::builder(TokioConnectionProvider::default()) {
Ok(mut builder) => {
if CONFIG.dns_prefer_ipv6() {
builder.options_mut().ip_strategy = hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4;
}
builder.build()
})
.inspect_err(|e| warn!("Error creating Hickory resolver, falling back to default: {e:?}"))
.map(|resolver| Arc::new(Self::Hickory(Arc::new(resolver))))
.unwrap_or_else(|_| Arc::new(Self::Default()))
let resolver = builder.build();
Arc::new(Self::Hickory(Arc::new(resolver)))
}
Err(e) => {
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
Arc::new(Self::Default())
}
}
}
// Note that we get an iterator of addresses, but we only grab the first one for convenience
async fn resolve_domain(&self, name: &str) -> Result<Vec<SocketAddr>, BoxError> {
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
pre_resolve(name)?;
let results: Vec<SocketAddr> = match self {
Self::Default() => tokio::net::lookup_host((name, 0)).await?.collect(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().map(|i| SocketAddr::new(i, 0)).collect(),
let result = match self {
Self::Default() => tokio::net::lookup_host(name).await?.next(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
};
for addr in &results {
if let Some(addr) = &result {
post_resolve(name, addr.ip())?;
}
Ok(results)
Ok(result)
}
}
fn pre_resolve(name: &str) -> Result<(), CustomHttpClientError> {
let Ok(host) = get_valid_host(name) else {
return Err(CustomHttpClientError::Invalid {
domain: name.to_string(),
});
};
if should_block_host(&host).is_err() {
if should_block_address(name) {
return Err(CustomHttpClientError::Blocked {
domain: name.to_string(),
});
@@ -286,11 +242,8 @@ impl Resolve for CustomDnsResolver {
let this = self.clone();
Box::pin(async move {
let name = name.as_str();
let results = this.resolve_domain(name).await?;
if results.is_empty() {
warn!("Unable to resolve {name} to any valid IP address");
}
Ok::<reqwest::dns::Addrs, _>(Box::new(results.into_iter()))
let result = this.resolve_domain(name).await?;
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
})
}
}
@@ -352,209 +305,3 @@ pub(crate) mod aws {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util::is_global_hardcoded;
use std::net::Ipv4Addr;
use url::Host;
// ===
// IPv4 numeric-format normalization
fn parse_to_ip(s: &str) -> Option<IpAddr> {
match Host::parse(s).ok()? {
Host::Ipv4(v4) => Some(IpAddr::V4(v4)),
Host::Ipv6(v6) => Some(IpAddr::V6(v6)),
Host::Domain(_) => None,
}
}
#[test]
fn dotted_decimal_loopback_normalizes() {
let ip = parse_to_ip("127.0.0.1").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn single_decimal_loopback_normalizes() {
// 127.0.0.1 == 2130706433
let ip = parse_to_ip("2130706433").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn hex_loopback_normalizes() {
let ip = parse_to_ip("0x7f000001").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn dotted_hex_loopback_normalizes() {
let ip = parse_to_ip("0x7f.0.0.1").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn octal_loopback_normalizes() {
// 017700000001 == 127.0.0.1
let ip = parse_to_ip("017700000001").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn dotted_octal_loopback_normalizes() {
let ip = parse_to_ip("0177.0.0.01").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn aws_metadata_decimal_blocked() {
// 169.254.169.254 == 2852039166 (link-local, AWS IMDS)
let ip = parse_to_ip("2852039166").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(169, 254, 169, 254)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn rfc1918_hex_blocked() {
// 10.0.0.1
let ip = parse_to_ip("0x0a000001").unwrap();
assert!(!is_global_hardcoded(ip));
}
#[test]
fn public_ip_decimal_allowed() {
// 8.8.8.8 == 134744072
let ip = parse_to_ip("134744072").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)));
assert!(is_global_hardcoded(ip));
}
// ===
// get_valid_host integration: numeric forms become Host::Ipv4
#[test]
fn get_valid_host_normalizes_decimal_int() {
let h = get_valid_host("2130706433").expect("valid");
assert!(matches!(h, Host::Ipv4(ip) if ip == Ipv4Addr::new(127, 0, 0, 1)));
}
#[test]
fn get_valid_host_normalizes_hex() {
let h = get_valid_host("0x7f000001").expect("valid");
assert!(matches!(h, Host::Ipv4(ip) if ip == Ipv4Addr::new(127, 0, 0, 1)));
}
#[test]
fn get_valid_host_normalizes_octal() {
let h = get_valid_host("017700000001").expect("valid");
assert!(matches!(h, Host::Ipv4(ip) if ip == Ipv4Addr::new(127, 0, 0, 1)));
}
// ===
// IPv6 formats
#[test]
fn ipv6_loopback_blocked() {
let h = get_valid_host("[::1]").expect("valid");
let Host::Ipv6(ip) = h else {
panic!("expected v6")
};
assert!(!is_global_hardcoded(IpAddr::V6(ip)));
}
#[test]
fn ipv4_mapped_in_ipv6_loopback_blocked() {
// ::ffff:127.0.0.1 — v4-mapped form; is_global_hardcoded blocks via ::ffff:0:0/96
let h = get_valid_host("[::ffff:127.0.0.1]").expect("valid");
let Host::Ipv6(ip) = h else {
panic!("expected v6")
};
assert!(!is_global_hardcoded(IpAddr::V6(ip)));
}
#[test]
fn ipv6_unique_local_blocked() {
let h = get_valid_host("[fc00::1]").expect("valid");
let Host::Ipv6(ip) = h else {
panic!("expected v6")
};
assert!(!is_global_hardcoded(IpAddr::V6(ip)));
}
// ===
// Punycode / IDN
#[test]
fn punycode_passthrough() {
let h = get_valid_host("xn--deadbeafcaf-lbb.test").expect("valid");
match h {
Host::Domain(d) => assert_eq!(d, "xn--deadbeafcaf-lbb.test"),
_ => panic!("expected domain"),
}
}
#[test]
fn idn_unicode_gets_punycoded() {
let h = get_valid_host("deadbeafcafé.test").expect("valid");
match h {
Host::Domain(d) => assert_eq!(d, "xn--deadbeafcaf-lbb.test"),
_ => panic!("expected domain"),
}
}
#[test]
fn idn_unicode_gets_punycoded_tld() {
let h = get_valid_host("deadbeaf.café").expect("valid");
match h {
Host::Domain(d) => assert_eq!(d, "deadbeaf.xn--caf-dma"),
_ => panic!("expected domain"),
}
}
#[test]
fn idn_emoji_gets_punycoded() {
let h = get_valid_host("xn--t88h.test").expect("valid"); // 🛡️.test
match h {
Host::Domain(d) => assert_eq!(d, "xn--t88h.test"),
_ => panic!("expected domain"),
}
}
#[test]
fn idn_unicode_to_punycode_roundtrip() {
let from_unicode = get_valid_host("🛡️.test").expect("valid");
let from_puny = get_valid_host("xn--t88h.test").expect("valid");
match (from_unicode, from_puny) {
(Host::Domain(a), Host::Domain(b)) => assert_eq!(a, b),
_ => panic!("expected domains"),
}
}
#[test]
fn invalid_punycode_rejected() {
// bare invalid punycode
assert!(get_valid_host("xn--").is_err());
}
#[test]
fn underscore_in_label_rejected() {
assert!(get_valid_host("dead_beaf.cafe").is_err());
}
#[test]
fn label_too_long_rejected() {
let label = "a".repeat(64);
assert!(get_valid_host(&format!("{label}.test")).is_err());
}
#[test]
fn domain_too_long_rejected() {
let big = "a.".repeat(130) + "test"; // > 253
assert!(get_valid_host(&big).is_err());
}
}
+3 -4
View File
@@ -17,7 +17,7 @@ use crate::{
CONFIG,
};
pub static FAKE_SSO_IDENTIFIER: &str = "00000000-01DC-01DC-01DC-000000000000";
pub static FAKE_IDENTIFIER: &str = "VW_DUMMY_IDENTIFIER_FOR_OIDC";
static SSO_JWT_ISSUER: LazyLock<String> = LazyLock::new(|| format!("{}|sso", CONFIG.domain_origin()));
@@ -188,7 +188,6 @@ pub async fn authorize_url(
client_challenge: OIDCCodeChallenge,
client_id: &str,
raw_redirect_uri: &str,
binding_hash: Option<String>,
conn: DbConn,
) -> ApiResult<Url> {
let redirect_uri = match client_id {
@@ -204,7 +203,7 @@ pub async fn authorize_url(
_ => err!(format!("Unsupported client {client_id}")),
};
let (auth_url, sso_auth) = Client::authorize_url(state, client_challenge, redirect_uri, binding_hash).await?;
let (auth_url, sso_auth) = Client::authorize_url(state, client_challenge, redirect_uri).await?;
sso_auth.save(&conn).await?;
Ok(auth_url)
}
@@ -284,7 +283,7 @@ pub async fn exchange_code(
let email_verified = id_claims.email_verified().or(user_info.email_verified());
let user_name = id_claims.preferred_username().or(user_info.preferred_username()).map(|un| un.to_string());
let user_name = id_claims.preferred_username().map(|un| un.to_string());
let refresh_token = token_response.refresh_token().map(|t| t.secret());
if refresh_token.is_none() && CONFIG.sso_scopes_vec().contains(&"offline_access".to_string()) {
+1 -2
View File
@@ -117,7 +117,6 @@ impl Client {
state: OIDCState,
client_challenge: OIDCCodeChallenge,
redirect_uri: String,
binding_hash: Option<String>,
) -> ApiResult<(Url, SsoAuth)> {
let scopes = CONFIG.sso_scopes_vec().into_iter().map(Scope::new);
let base64_state = data_encoding::BASE64.encode(state.to_string().as_bytes());
@@ -140,7 +139,7 @@ impl Client {
}
let (auth_url, _, nonce) = auth_req.url();
Ok((auth_url, SsoAuth::new(state, client_challenge, nonce.secret().clone(), redirect_uri, binding_hash)))
Ok((auth_url, SsoAuth::new(state, client_challenge, nonce.secret().clone(), redirect_uri)))
}
pub async fn exchange_code(
+2 -13
View File
@@ -1,17 +1,6 @@
body {
padding-top: 75px;
}
/* Some extra width's for the main layout */
@media (min-width: 1600px) {
.container-xxl {
max-width: 1520px;
}
}
@media (min-width: 1800px) {
.container-xxl {
max-width: 1720px;
}
}
img {
width: 48px;
height: 48px;
@@ -49,8 +38,8 @@ img {
max-width: 130px;
}
#users-table .vw-actions, #orgs-table .vw-actions {
min-width: 170px;
max-width: 180px;
min-width: 155px;
max-width: 160px;
}
#users-table .vw-org-cell {
max-height: 120px;
+1 -1
View File
@@ -27,7 +27,7 @@
</symbol>
</svg>
<nav class="navbar navbar-expand-md navbar-dark bg-dark mb-4 shadow fixed-top">
<div class="container-xxl">
<div class="container-xl">
<a class="navbar-brand" href="{{urlpath}}/admin"><img class="vaultwarden-icon" src="{{urlpath}}/vw_static/vaultwarden-icon.png" alt="V">aultwarden Admin</a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarCollapse"
aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation">
+1 -1
View File
@@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
<div id="diagnostics-block" class="my-3 p-3 rounded shadow">
<h6 class="border-bottom pb-2 mb-2">Diagnostics</h6>
+1 -1
View File
@@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
{{#if error}}
<div class="align-items-center p-3 mb-3 text-opacity-50 text-dark bg-warning rounded shadow">
<div>
+1 -1
View File
@@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
<div id="organizations-block" class="my-3 p-3 rounded shadow">
<h6 class="border-bottom pb-2 mb-3">Organizations</h6>
<div class="table-responsive-xl small">
+1 -1
View File
@@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
<div id="admin_token_warning" class="alert alert-warning alert-dismissible fade show d-none">
<button type="button" class="btn-close" data-bs-target="admin_token_warning" data-bs-dismiss="alert" aria-label="Close"></button>
You are using a plain text `ADMIN_TOKEN` which is insecure.<br>
+2 -2
View File
@@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
<div id="users-block" class="my-3 p-3 rounded shadow">
<h6 class="border-bottom pb-2 mb-3">Registered Users</h6>
<div class="table-responsive-xl small">
@@ -43,7 +43,7 @@
</td>
{{#if ../sso_enabled}}
<td>
<span class="d-block text-break text-wrap">{{sso_identifier}}</span>
<span class="d-block">{{sso_identifier}}</span>
</td>
{{/if}}
<td>
@@ -137,14 +137,6 @@ bit-nav-logo bit-nav-item .bwi-shield {
app-user-layout app-danger-zone button:nth-child(1) {
@extend %vw-hide;
}
/* Hide unsupported Forwarding email alias options */
ng-dropdown-panel div.ng-dropdown-panel-items div:has(> [title="Firefox Relay"]) {
@extend %vw-hide;
}
ng-dropdown-panel div.ng-dropdown-panel-items div:has(> [title="DuckDuckGo"]) {
@extend %vw-hide;
}
/**** END Static Vaultwarden Changes ****/
/**** START Dynamic Vaultwarden Changes ****/
{{#if signup_disabled}}
+8 -18
View File
@@ -734,7 +734,7 @@ where
warn!("Can't connect to database, retrying: {e:?}");
sleep(Duration::from_secs(1)).await;
sleep(Duration::from_millis(1_000)).await;
}
}
}
@@ -818,18 +818,14 @@ pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
std::net::IpAddr::V4(ip) => {
!(ip.octets()[0] == 0 // "This network"
|| ip.is_private()
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) // ip.is_shared()
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|| ip.is_loopback()
|| ip.is_link_local()
// addresses reserved for future protocols (`192.0.0.0/24`)
// .9 and .10 are documented as globally reachable so they're excluded
|| (
ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0
&& ip.octets()[3] != 9 && ip.octets()[3] != 10
)
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|| ip.is_documentation()
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) // ip.is_reserved()
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|| ip.is_broadcast())
}
std::net::IpAddr::V6(ip) => {
@@ -853,17 +849,11 @@ pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
// AS112-v6 (`2001:4:112::/48`)
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
// ORCHIDv2 (`2001:20::/28`)
// Drone Remote ID Protocol Entity Tags (DETs) Prefix (`2001:30::/28`)`
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x3F).contains(&b))
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
))
// 6to4 (`2002::/16`) it's not explicitly documented as globally reachable,
// IANA says N/A.
|| matches!(ip.segments(), [0x2002, _, _, _, _, _, _, _])
|| matches!(ip.segments(), [0x2001, 0xdb8, ..] | [0x3fff, 0..=0x0fff, ..]) // ip.is_documentation()
// Segment Routing (SRv6) SIDs (`5f00::/16`)
|| matches!(ip.segments(), [0x5f00, ..])
|| ip.is_unique_local()
|| ip.is_unicast_link_local())
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
}
}
}