mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2026-05-08 13:18:54 +03:00
Compare commits
75 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| f21a3adae2 | |||
| 07aa377af7 | |||
| 14258caec9 | |||
| cb46fcb948 | |||
| b89648a136 | |||
| c3bd1eb565 | |||
| 8c3c969938 | |||
| 38a6850b8d | |||
| d297e274a3 | |||
| a354e57659 | |||
| 5cc7360816 | |||
| 62748100f0 | |||
| fcbdebd6d7 | |||
| 454b8e2a35 | |||
| 7883da554e | |||
| fd2b6528a9 | |||
| cc57e60886 | |||
| e5681258f0 | |||
| 7cf0c5d67e | |||
| b04ed75f9f | |||
| 0ed8ab68f7 | |||
| dfebee57ec | |||
| bfe420a018 | |||
| e7e4b9a86d | |||
| bb549986e6 | |||
| 39954af96a | |||
| a6b43651ca | |||
| 3f28b583db | |||
| d4f67429d6 | |||
| fc43737868 | |||
| 43df0fb7f4 | |||
| d29cd29f55 | |||
| 2811df2953 | |||
| 8f0e99b875 | |||
| f07a91141a | |||
| 787822854c | |||
| f62a7a66c8 | |||
| 3a1378f469 | |||
| dde63e209e | |||
| 235cf88231 | |||
| c0a78dd55a | |||
| 711bb53d3d | |||
| 650defac75 | |||
| 2b3736802d | |||
| 9c7df6412c | |||
| 065c1f2cd5 | |||
| 1a1d7f578a | |||
| 2b16a05e54 | |||
| c6e9948984 | |||
| ecdb18fcde | |||
| df25d316d6 | |||
| 747286dccd | |||
| e60105411b | |||
| 937857a0bc | |||
| ba55191676 | |||
| c555f7d198 | |||
| 74819b95bd | |||
| da2af3d362 | |||
| 1583fe4af3 | |||
| 36f0620fd1 | |||
| 3cd2d4afe7 | |||
| d09c45bb63 | |||
| feecfb20da | |||
| 347279a12c | |||
| 7f65a254b3 | |||
| cc80f689ed | |||
| 4737192853 | |||
| 0c6817cb4e | |||
| 25a71d913f | |||
| b2cd556f3e | |||
| 4352fffeec | |||
| 8d08697cf8 | |||
| 9f1df42259 | |||
| 1e1f9957cd | |||
| bf37657c08 |
+16
-10
@@ -372,16 +372,22 @@
|
||||
## Note that clients cache the /api/config endpoint for about 1 hour and it could take some time before they are enabled or disabled!
|
||||
##
|
||||
## The following flags are available:
|
||||
## - "inline-menu-positioning-improvements": Enable the use of inline menu password generator and identity suggestions in the browser extension.
|
||||
## - "inline-menu-totp": Enable the use of inline menu TOTP codes in the browser extension.
|
||||
## - "ssh-agent": Enable SSH agent support on Desktop. (Needs desktop >=2024.12.0)
|
||||
## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Needs clients >=2024.12.0)
|
||||
## - "pm-25373-windows-biometrics-v2": Enable the new implementation of biometrics on Windows. (Needs desktop >= 2025.11.0)
|
||||
## - "export-attachments": Enable support for exporting attachments (Clients >=2025.4.0)
|
||||
## - "anon-addy-self-host-alias": Enable configuring self-hosted Anon Addy alias generator. (Needs Android >=2025.3.0, iOS >=2025.4.0)
|
||||
## - "simple-login-self-host-alias": Enable configuring self-hosted Simple Login alias generator. (Needs Android >=2025.3.0, iOS >=2025.4.0)
|
||||
## - "mutual-tls": Enable the use of mutual TLS on Android (Client >= 2025.2.0)
|
||||
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||
## - "pm-5594-safari-account-switching": Enable account switching in Safari. (Safari >= 2026.2.0)
|
||||
## - "ssh-agent": Enable SSH agent support on Desktop. (Desktop >= 2024.12.0)
|
||||
## - "ssh-agent-v2": Enable newer SSH agent support. (Desktop >= 2026.2.1)
|
||||
## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Clients >= 2024.12.0)
|
||||
## - "pm-25373-windows-biometrics-v2": Enable the new implementation of biometrics on Windows. (Desktop >= 2025.11.0)
|
||||
## - "anon-addy-self-host-alias": Enable configuring self-hosted Anon Addy alias generator. (Android >= 2025.3.0, iOS >= 2025.4.0)
|
||||
## - "simple-login-self-host-alias": Enable configuring self-hosted Simple Login alias generator. (Android >= 2025.3.0, iOS >= 2025.4.0)
|
||||
## - "mutual-tls": Enable the use of mutual TLS on Android (Clients >= 2025.2.0)
|
||||
## - "cxp-import-mobile": Enable the import via CXP on iOS (Clients >= 2025.9.2)
|
||||
## - "cxp-export-mobile": Enable the export via CXP on iOS (Clients >= 2025.9.2)
|
||||
## - "pm-30529-webauthn-related-origins":
|
||||
## - "desktop-ui-migration-milestone-1": Special feature flag for desktop UI (Desktop >= 2026.2.0)
|
||||
## - "desktop-ui-migration-milestone-2": Special feature flag for desktop UI (Desktop >= 2026.2.0)
|
||||
## - "desktop-ui-migration-milestone-3": Special feature flag for desktop UI (Desktop >= 2026.2.0)
|
||||
## - "desktop-ui-migration-milestone-4": Special feature flag for desktop UI (Desktop >= 2026.2.0)
|
||||
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=
|
||||
|
||||
## Require new device emails. When a user logs in an email is required to be sent.
|
||||
## If sending the email fails the login attempt will fail!!
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
# Ignore vendored scripts in GitHub stats
|
||||
src/static/scripts/* linguist-vendored
|
||||
|
||||
|
||||
+22
-24
@@ -1,6 +1,10 @@
|
||||
name: Build
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
@@ -30,6 +34,10 @@ on:
|
||||
- "docker/DockerSettings.yaml"
|
||||
- "macros/**"
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and Test ${{ matrix.channel }}
|
||||
@@ -54,7 +62,7 @@ jobs:
|
||||
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
@@ -63,7 +71,6 @@ jobs:
|
||||
# Determine rust-toolchain version
|
||||
- name: Init Variables
|
||||
id: toolchain
|
||||
shell: bash
|
||||
env:
|
||||
CHANNEL: ${{ matrix.channel }}
|
||||
run: |
|
||||
@@ -78,32 +85,23 @@ jobs:
|
||||
# End Determine rust-toolchain version
|
||||
|
||||
|
||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 # master @ Dec 16, 2025, 6:11 PM GMT+1
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
components: clippy, rustfmt
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||
- name: "Install MSRV version"
|
||||
uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 # master @ Dec 16, 2025, 6:11 PM GMT+1
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
# End Install the MSRV channel to be used
|
||||
|
||||
# Set the current matrix toolchain version as default
|
||||
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||
- name: "Install toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||
env:
|
||||
CHANNEL: ${{ matrix.channel }}
|
||||
RUST_TOOLCHAIN: ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||
run: |
|
||||
# Remove the rust-toolchain.toml
|
||||
rm rust-toolchain.toml
|
||||
# Set the default
|
||||
|
||||
# Install the correct toolchain version
|
||||
rustup toolchain install "${RUST_TOOLCHAIN}" --profile minimal --no-self-update
|
||||
|
||||
# If this matrix is the `rust-toolchain` flow, also install rustfmt and clippy
|
||||
if [[ "${CHANNEL}" == 'rust-toolchain' ]]; then
|
||||
rustup component add --toolchain "${RUST_TOOLCHAIN}" rustfmt clippy
|
||||
fi
|
||||
|
||||
# Set as the default toolchain
|
||||
rustup default "${RUST_TOOLCHAIN}"
|
||||
|
||||
# Show environment
|
||||
@@ -115,7 +113,7 @@ jobs:
|
||||
|
||||
# Enable Rust Caching
|
||||
- name: Rust Caching
|
||||
uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1
|
||||
with:
|
||||
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
name: Check templates
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on: [ push, pull_request ]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
docker-templates:
|
||||
name: Validate docker templates
|
||||
@@ -12,7 +20,7 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
# End Checkout the repo
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
name: Hadolint
|
||||
|
||||
on: [ push, pull_request ]
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on: [ push, pull_request ]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
hadolint:
|
||||
@@ -13,7 +20,7 @@ jobs:
|
||||
steps:
|
||||
# Start Docker Buildx
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
# https://github.com/moby/buildkit/issues/3969
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||
with:
|
||||
@@ -25,7 +32,6 @@ jobs:
|
||||
|
||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||
- name: Download hadolint
|
||||
shell: bash
|
||||
run: |
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo chmod +x /usr/local/bin/hadolint
|
||||
@@ -34,20 +40,18 @@ jobs:
|
||||
# End Download hadolint
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
# End Checkout the repo
|
||||
|
||||
# Test Dockerfiles with hadolint
|
||||
- name: Run hadolint
|
||||
shell: bash
|
||||
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||
# End Test Dockerfiles with hadolint
|
||||
|
||||
# Test Dockerfiles with docker build checks
|
||||
- name: Run docker build check
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Checking docker/Dockerfile.debian"
|
||||
docker build --check . -f docker/Dockerfile.debian
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
name: Release
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
# Apply concurrency control only on the upstream repo
|
||||
group: ${{ github.repository == 'dani-garcia/vaultwarden' && format('{0}-{1}', github.workflow, github.ref) || github.run_id }}
|
||||
# Don't cancel other runs when creating a tag
|
||||
cancel-in-progress: ${{ github.ref_type == 'branch' }}
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
@@ -10,33 +16,31 @@ on:
|
||||
# https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#filter-pattern-cheat-sheet
|
||||
- '[1-2].[0-9]+.[0-9]+'
|
||||
|
||||
concurrency:
|
||||
# Apply concurrency control only on the upstream repo
|
||||
group: ${{ github.repository == 'dani-garcia/vaultwarden' && format('{0}-{1}', github.workflow, github.ref) || github.run_id }}
|
||||
# Don't cancel other runs when creating a tag
|
||||
cancel-in-progress: ${{ github.ref_type == 'branch' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
# The *_REPO variables need to be configured as repository variables
|
||||
# Append `/settings/variables/actions` to your repo url
|
||||
# DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
|
||||
# Check for Docker hub credentials in secrets
|
||||
HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
||||
# GHCR_REPO needs to be 'ghcr.io/<user>/<repo>'
|
||||
# Check for Github credentials in secrets
|
||||
HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }}
|
||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||
# Check for Quay.io credentials in secrets
|
||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||
# A "release" environment must be created in the repository settings
|
||||
# (Settings > Environments > New environment) with the following
|
||||
# variables and secrets configured as needed.
|
||||
#
|
||||
# Variables (only set the ones for registries you want to push to):
|
||||
# DOCKERHUB_REPO: 'index.docker.io/<user>/<repo>'
|
||||
# QUAY_REPO: 'quay.io/<user>/<repo>'
|
||||
# GHCR_REPO: 'ghcr.io/<user>/<repo>'
|
||||
#
|
||||
# Secrets (only required when the corresponding *_REPO variable is set):
|
||||
# DOCKERHUB_REPO => DOCKERHUB_USERNAME, DOCKERHUB_TOKEN
|
||||
# QUAY_REPO => QUAY_USERNAME, QUAY_TOKEN
|
||||
# GITHUB_TOKEN is provided automatically
|
||||
|
||||
jobs:
|
||||
docker-build:
|
||||
name: Build Vaultwarden containers
|
||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||
environment:
|
||||
name: release
|
||||
deployment: false
|
||||
permissions:
|
||||
packages: write # Needed to upload packages and artifacts
|
||||
contents: read
|
||||
@@ -54,13 +58,13 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Initialize QEMU binfmt support
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0
|
||||
with:
|
||||
platforms: "arm64,arm"
|
||||
|
||||
# Start Docker Buildx
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
# https://github.com/moby/buildkit/issues/3969
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||
with:
|
||||
@@ -73,7 +77,7 @@ jobs:
|
||||
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
# We need fetch-depth of 0 so we also get all the tag metadata
|
||||
with:
|
||||
persist-credentials: false
|
||||
@@ -102,14 +106,14 @@ jobs:
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
if: ${{ vars.DOCKERHUB_REPO != '' }}
|
||||
|
||||
- name: Add registry for DockerHub
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
if: ${{ vars.DOCKERHUB_REPO != '' }}
|
||||
env:
|
||||
DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }}
|
||||
run: |
|
||||
@@ -117,15 +121,15 @@ jobs:
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
if: ${{ vars.GHCR_REPO != '' }}
|
||||
|
||||
- name: Add registry for ghcr.io
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
if: ${{ vars.GHCR_REPO != '' }}
|
||||
env:
|
||||
GHCR_REPO: ${{ vars.GHCR_REPO }}
|
||||
run: |
|
||||
@@ -133,15 +137,15 @@ jobs:
|
||||
|
||||
# Login to Quay.io
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
if: ${{ vars.QUAY_REPO != '' }}
|
||||
|
||||
- name: Add registry for Quay.io
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
if: ${{ vars.QUAY_REPO != '' }}
|
||||
env:
|
||||
QUAY_REPO: ${{ vars.QUAY_REPO }}
|
||||
run: |
|
||||
@@ -155,7 +159,7 @@ jobs:
|
||||
run: |
|
||||
#
|
||||
# Check if there is a GitHub Container Registry Login and use it for caching
|
||||
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
||||
if [[ -n "${GHCR_REPO}" ]]; then
|
||||
echo "BAKE_CACHE_FROM=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}-${NORMALIZED_ARCH}" | tee -a "${GITHUB_ENV}"
|
||||
echo "BAKE_CACHE_TO=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}-${NORMALIZED_ARCH},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
|
||||
else
|
||||
@@ -181,7 +185,7 @@ jobs:
|
||||
|
||||
- name: Bake ${{ matrix.base_image }} containers
|
||||
id: bake_vw
|
||||
uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0
|
||||
uses: docker/bake-action@a66e1c87e2eca0503c343edf1d208c716d54b8a8 # v7.1.0
|
||||
env:
|
||||
BASE_TAGS: "${{ steps.determine-version.outputs.BASE_TAGS }}"
|
||||
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||
@@ -218,7 +222,7 @@ jobs:
|
||||
touch "${RUNNER_TEMP}/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: digests-${{ env.NORMALIZED_ARCH }}-${{ matrix.base_image }}
|
||||
path: ${{ runner.temp }}/digests/*
|
||||
@@ -233,12 +237,12 @@ jobs:
|
||||
|
||||
# Upload artifacts to Github Actions and Attest the binaries
|
||||
- name: Attest binaries
|
||||
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
|
||||
uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0
|
||||
with:
|
||||
subject-path: vaultwarden-${{ env.NORMALIZED_ARCH }}
|
||||
|
||||
- name: Upload binaries as artifacts
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-${{ env.NORMALIZED_ARCH }}-${{ matrix.base_image }}
|
||||
path: vaultwarden-${{ env.NORMALIZED_ARCH }}
|
||||
@@ -247,6 +251,9 @@ jobs:
|
||||
name: Merge manifests
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker-build
|
||||
environment:
|
||||
name: release
|
||||
deployment: false
|
||||
permissions:
|
||||
packages: write # Needed to upload packages and artifacts
|
||||
attestations: write # Needed to generate an artifact attestation for a build
|
||||
@@ -257,7 +264,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: digests-*-${{ matrix.base_image }}
|
||||
@@ -265,14 +272,14 @@ jobs:
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
if: ${{ vars.DOCKERHUB_REPO != '' }}
|
||||
|
||||
- name: Add registry for DockerHub
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
if: ${{ vars.DOCKERHUB_REPO != '' }}
|
||||
env:
|
||||
DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }}
|
||||
run: |
|
||||
@@ -280,15 +287,15 @@ jobs:
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
if: ${{ vars.GHCR_REPO != '' }}
|
||||
|
||||
- name: Add registry for ghcr.io
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
if: ${{ vars.GHCR_REPO != '' }}
|
||||
env:
|
||||
GHCR_REPO: ${{ vars.GHCR_REPO }}
|
||||
run: |
|
||||
@@ -296,15 +303,15 @@ jobs:
|
||||
|
||||
# Login to Quay.io
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
if: ${{ vars.QUAY_REPO != '' }}
|
||||
|
||||
- name: Add registry for Quay.io
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
if: ${{ vars.QUAY_REPO != '' }}
|
||||
env:
|
||||
QUAY_REPO: ${{ vars.QUAY_REPO }}
|
||||
run: |
|
||||
@@ -357,24 +364,24 @@ jobs:
|
||||
|
||||
# Attest container images
|
||||
- name: Attest - docker.io - ${{ matrix.base_image }}
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && env.DIGEST_SHA != ''}}
|
||||
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
|
||||
if: ${{ vars.DOCKERHUB_REPO != '' && env.DIGEST_SHA != ''}}
|
||||
uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0
|
||||
with:
|
||||
subject-name: ${{ vars.DOCKERHUB_REPO }}
|
||||
subject-digest: ${{ env.DIGEST_SHA }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: Attest - ghcr.io - ${{ matrix.base_image }}
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' && env.DIGEST_SHA != ''}}
|
||||
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
|
||||
if: ${{ vars.GHCR_REPO != '' && env.DIGEST_SHA != ''}}
|
||||
uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0
|
||||
with:
|
||||
subject-name: ${{ vars.GHCR_REPO }}
|
||||
subject-digest: ${{ env.DIGEST_SHA }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: Attest - quay.io - ${{ matrix.base_image }}
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' && env.DIGEST_SHA != ''}}
|
||||
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
|
||||
if: ${{ vars.QUAY_REPO != '' && env.DIGEST_SHA != ''}}
|
||||
uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0
|
||||
with:
|
||||
subject-name: ${{ vars.QUAY_REPO }}
|
||||
subject-digest: ${{ env.DIGEST_SHA }}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
name: Cleanup
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: false
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
name: Trivy
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
@@ -29,12 +33,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1
|
||||
uses: aquasecurity/trivy-action@ed142fd0673e97e23eac54620cfb913e5ce36c25 # v0.36.0
|
||||
env:
|
||||
TRIVY_DB_REPOSITORY: docker.io/aquasec/trivy-db:2,public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2
|
||||
TRIVY_JAVA_DB_REPOSITORY: docker.io/aquasec/trivy-java-db:1,public.ecr.aws/aquasecurity/trivy-java-db:1,ghcr.io/aquasecurity/trivy-java-db:1
|
||||
@@ -46,6 +50,6 @@ jobs:
|
||||
severity: CRITICAL,HIGH
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
uses: github/codeql-action/upload-sarif@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
name: Code Spell Checking
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on: [ push, pull_request ]
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
@@ -12,11 +16,11 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
# End Checkout the repo
|
||||
|
||||
# When this version is updated, do not forget to update this in `.pre-commit-config.yaml` too
|
||||
- name: Spell Check Repo
|
||||
uses: crate-ci/typos@1a319b54cc9e3b333fed6a5c88ba1a90324da514 # v1.40.1
|
||||
uses: crate-ci/typos@7c572958218557a3272c2d6719629443b5cc26fd # v1.45.2
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
name: Security Analysis with zizmor
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -6,8 +11,6 @@ on:
|
||||
pull_request:
|
||||
branches: ["**"]
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
zizmor:
|
||||
name: Run zizmor
|
||||
@@ -16,12 +19,12 @@ jobs:
|
||||
security-events: write # To write the security report
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run zizmor
|
||||
uses: zizmorcore/zizmor-action@e639db99335bc9038abc0e066dfcd72e23d26fb4 # v0.3.0
|
||||
uses: zizmorcore/zizmor-action@b1d7e1fb5de872772f31590499237e7cce841e8e # v0.5.3
|
||||
with:
|
||||
# intentionally not scanning the entire repository,
|
||||
# since it contains integration tests.
|
||||
|
||||
+55
-53
@@ -1,58 +1,60 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # v6.0.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: mixed-line-ending
|
||||
args: ["--fix=no"]
|
||||
- id: end-of-file-fixer
|
||||
exclude: "(.*js$|.*css$)"
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: check-symlinks
|
||||
- id: forbid-submodules
|
||||
- repo: local
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: mixed-line-ending
|
||||
args: [ "--fix=no" ]
|
||||
- id: end-of-file-fixer
|
||||
exclude: "(.*js$|.*css$)"
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: check-symlinks
|
||||
- id: forbid-submodules
|
||||
|
||||
# When this version is updated, do not forget to update this in `.github/workflows/typos.yaml` too
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: 7c572958218557a3272c2d6719629443b5cc26fd # v1.45.2
|
||||
hooks:
|
||||
- id: fmt
|
||||
name: fmt
|
||||
description: Format files with cargo fmt.
|
||||
entry: cargo fmt
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
args: ["--", "--check"]
|
||||
- id: cargo-test
|
||||
name: cargo test
|
||||
description: Test the package for errors.
|
||||
entry: cargo test
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql", "--"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
description: Lint Rust sources
|
||||
entry: cargo clippy
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql", "--", "-D", "warnings"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: check-docker-templates
|
||||
name: check docker templates
|
||||
description: Check if the Docker templates are updated
|
||||
language: system
|
||||
entry: sh
|
||||
args:
|
||||
- "-c"
|
||||
- "cd docker && make"
|
||||
# When this version is updated, do not forget to update this in `.github/workflows/typos.yaml` too
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: 1a319b54cc9e3b333fed6a5c88ba1a90324da514 # v1.40.1
|
||||
hooks:
|
||||
- id: typos
|
||||
- id: typos
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: fmt
|
||||
name: fmt
|
||||
description: Format files with cargo fmt.
|
||||
entry: cargo fmt
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
args: [ "--", "--check" ]
|
||||
- id: cargo-test
|
||||
name: cargo test
|
||||
description: Test the package for errors.
|
||||
entry: cargo test
|
||||
language: system
|
||||
args: [ "--features", "sqlite,mysql,postgresql", "--" ]
|
||||
types_or: [ rust, file ]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
description: Lint Rust sources
|
||||
entry: cargo clippy
|
||||
language: system
|
||||
args: [ "--features", "sqlite,mysql,postgresql", "--", "-D", "warnings" ]
|
||||
types_or: [ rust, file ]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain.toml|rustfmt.toml|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: check-docker-templates
|
||||
name: check docker templates
|
||||
description: Check if the Docker templates are updated
|
||||
language: system
|
||||
entry: sh
|
||||
args:
|
||||
- "-c"
|
||||
- "cd docker && make"
|
||||
|
||||
@@ -23,4 +23,6 @@ extend-ignore-re = [
|
||||
# https://github.com/bitwarden/server/blob/dff9f1cf538198819911cf2c20f8cda3307701c5/src/Notifications/HubHelpers.cs#L86
|
||||
# https://github.com/bitwarden/clients/blob/9612a4ac45063e372a6fbe87eb253c7cb3c588fb/libs/common/src/auth/services/anonymous-hub.service.ts#L45
|
||||
"AuthRequestResponseRecieved",
|
||||
# Ignore Punycode/IDN tests
|
||||
"xn--.+"
|
||||
]
|
||||
|
||||
Generated
+1015
-812
File diff suppressed because it is too large
Load Diff
+45
-41
@@ -1,6 +1,6 @@
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
rust-version = "1.90.0"
|
||||
rust-version = "1.93.0"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
publish = false
|
||||
@@ -23,15 +23,17 @@ publish.workspace = true
|
||||
|
||||
[features]
|
||||
default = [
|
||||
# "sqlite",
|
||||
# "sqlite" or "sqlite_system",
|
||||
# "mysql",
|
||||
# "postgresql",
|
||||
]
|
||||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||
enable_syslog = []
|
||||
# Please enable at least one of these DB backends.
|
||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "dep:libsqlite3-sys"]
|
||||
sqlite_system = ["diesel/sqlite", "diesel_migrations/sqlite"]
|
||||
sqlite = ["sqlite_system", "libsqlite3-sys/bundled"] # Alternative to the above, statically linked SQLite into the binary instead of dynamically.
|
||||
# Enable to use a vendored and statically linked openssl
|
||||
vendored_openssl = ["openssl/vendored"]
|
||||
# Enable MiMalloc memory allocator to replace the default malloc
|
||||
@@ -78,46 +80,46 @@ rmpv = "1.3.1" # MessagePack library
|
||||
dashmap = "6.1.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.31"
|
||||
tokio = { version = "1.48.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||
tokio-util = { version = "0.7.17", features = ["compat"]}
|
||||
futures = "0.3.32"
|
||||
tokio = { version = "1.52.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||
tokio-util = { version = "0.7.18", features = ["compat"]}
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = "1.0.148"
|
||||
serde_json = "1.0.149"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
# Currently pinned diesel to v2.3.3 as newer version break MySQL/MariaDB compatibility
|
||||
diesel = { version = "2.3.5", features = ["chrono", "r2d2", "numeric"] }
|
||||
diesel_migrations = "2.3.1"
|
||||
diesel = { version = "2.3.9", features = ["chrono", "r2d2", "numeric"] }
|
||||
diesel_migrations = "2.3.2"
|
||||
|
||||
derive_more = { version = "2.1.1", features = ["from", "into", "as_ref", "deref", "display"] }
|
||||
diesel-derive-newtype = "2.1.2"
|
||||
|
||||
# Bundled/Static SQLite
|
||||
libsqlite3-sys = { version = "0.35.0", features = ["bundled"], optional = true }
|
||||
# SQLite, statically bundled unless the `sqlite_system` feature is enabled
|
||||
libsqlite3-sys = { version = "0.37.0", optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = "0.9.2"
|
||||
rand = "0.10.1"
|
||||
ring = "0.17.14"
|
||||
subtle = "2.6.1"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "1.19.0", features = ["v4"] }
|
||||
uuid = { version = "1.23.1", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.42", features = ["clock", "serde"], default-features = false }
|
||||
chrono = { version = "0.4.44", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.10.4"
|
||||
time = "0.3.44"
|
||||
time = "0.3.47"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler_ng = "2.4.0"
|
||||
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.9.0"
|
||||
data-encoding = "2.11.0"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = { version = "10.2.0", features = ["use_pem", "rust_crypto"], default-features = false }
|
||||
jsonwebtoken = { version = "10.3.0", features = ["use_pem", "rust_crypto"], default-features = false }
|
||||
|
||||
# TOTP library
|
||||
totp-lite = "2.0.1"
|
||||
@@ -128,67 +130,67 @@ yubico = { package = "yubico_ng", version = "0.14.1", features = ["online-tokio"
|
||||
# WebAuthn libraries
|
||||
# danger-allow-state-serialisation is needed to save the state in the db
|
||||
# danger-credential-internals is needed to support U2F to Webauthn migration
|
||||
webauthn-rs = { version = "0.5.4", features = ["danger-allow-state-serialisation", "danger-credential-internals"] }
|
||||
webauthn-rs-proto = "0.5.4"
|
||||
webauthn-rs-core = "0.5.4"
|
||||
webauthn-rs = { version = "0.5.5", features = ["danger-allow-state-serialisation", "danger-credential-internals"] }
|
||||
webauthn-rs-proto = "0.5.5"
|
||||
webauthn-rs-core = "0.5.5"
|
||||
|
||||
# Handling of URL's for WebAuthn and favicons
|
||||
url = "2.5.7"
|
||||
url = "2.5.8"
|
||||
|
||||
# Email libraries
|
||||
lettre = { version = "0.11.19", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "hostname", "tracing", "tokio1-rustls", "ring", "rustls-native-certs"], default-features = false }
|
||||
lettre = { version = "0.11.21", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "hostname", "tracing", "tokio1-rustls", "ring", "rustls-native-certs"], default-features = false }
|
||||
percent-encoding = "2.3.2" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.9"
|
||||
|
||||
# HTML Template library
|
||||
handlebars = { version = "6.3.2", features = ["dir_source"] }
|
||||
handlebars = { version = "6.4.0", features = ["dir_source"] }
|
||||
|
||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||
reqwest = { version = "0.12.28", features = ["rustls-tls", "rustls-tls-native-roots", "stream", "json", "deflate", "gzip", "brotli", "zstd", "socks", "cookies", "charset", "http2", "system-proxy"], default-features = false}
|
||||
hickory-resolver = "0.25.2"
|
||||
hickory-resolver = "0.26.1"
|
||||
|
||||
# Favicon extraction libraries
|
||||
html5gum = "0.8.3"
|
||||
regex = { version = "1.12.2", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
regex = { version = "1.12.3", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.3.2"
|
||||
bytes = "1.11.0"
|
||||
svg-hush = "0.9.5"
|
||||
bytes = "1.11.1"
|
||||
svg-hush = "0.9.6"
|
||||
|
||||
# Cache function results (Used for version check and favicon fetching)
|
||||
cached = { version = "0.56.0", features = ["async"] }
|
||||
cached = { version = "0.59.0", features = ["async"] }
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.18.1"
|
||||
cookie_store = "0.22.0"
|
||||
cookie_store = "0.22.1"
|
||||
|
||||
# Used by U2F, JWT and PostgreSQL
|
||||
openssl = "0.10.75"
|
||||
openssl = "0.10.78"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
pastey = "0.2.1"
|
||||
pastey = "0.2.2"
|
||||
governor = "0.10.4"
|
||||
|
||||
# OIDC for SSO
|
||||
openidconnect = { version = "4.0.1", features = ["reqwest", "native-tls"] }
|
||||
mini-moka = "0.10.3"
|
||||
openidconnect = { version = "4.0.1", features = ["reqwest", "rustls-tls"] }
|
||||
moka = { version = "0.12.15", features = ["future"] }
|
||||
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.27"
|
||||
semver = "1.0.28"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.48", features = ["secure"], default-features = false, optional = true }
|
||||
mimalloc = { version = "0.1.50", features = ["secure"], default-features = false, optional = true }
|
||||
|
||||
which = "8.0.0"
|
||||
which = "8.0.2"
|
||||
|
||||
# Argon2 library with support for the PHC format
|
||||
argon2 = "0.5.3"
|
||||
|
||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||
rpassword = "7.4.0"
|
||||
rpassword = "7.5.1"
|
||||
|
||||
# Loading a dynamic CSS Stylesheet
|
||||
grass_compiler = { version = "0.13.4", default-features = false }
|
||||
@@ -197,10 +199,10 @@ grass_compiler = { version = "0.13.4", default-features = false }
|
||||
opendal = { version = "0.55.0", features = ["services-fs"], default-features = false }
|
||||
|
||||
# For retrieving AWS credentials, including temporary SSO credentials
|
||||
anyhow = { version = "1.0.100", optional = true }
|
||||
aws-config = { version = "1.8.12", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true }
|
||||
aws-credential-types = { version = "1.2.11", optional = true }
|
||||
aws-smithy-runtime-api = { version = "1.9.3", optional = true }
|
||||
anyhow = { version = "1.0.102", optional = true }
|
||||
aws-config = { version = "1.8.16", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true }
|
||||
aws-credential-types = { version = "1.2.14", optional = true }
|
||||
aws-smithy-runtime-api = { version = "1.12.0", optional = true }
|
||||
http = { version = "1.4.0", optional = true }
|
||||
reqsign = { version = "0.16.5", optional = true }
|
||||
|
||||
@@ -301,6 +303,7 @@ branches_sharing_code = "deny"
|
||||
case_sensitive_file_extension_comparisons = "deny"
|
||||
cast_lossless = "deny"
|
||||
clone_on_ref_ptr = "deny"
|
||||
duration_suboptimal_units = "deny"
|
||||
equatable_if_let = "deny"
|
||||
excessive_precision = "deny"
|
||||
filter_map_next = "deny"
|
||||
@@ -322,6 +325,7 @@ needless_continue = "deny"
|
||||
needless_lifetimes = "deny"
|
||||
option_option = "deny"
|
||||
redundant_clone = "deny"
|
||||
ref_option = "deny"
|
||||
string_add_assign = "deny"
|
||||
unnecessary_join = "deny"
|
||||
unnecessary_self_imports = "deny"
|
||||
|
||||
@@ -59,8 +59,9 @@ A nearly complete implementation of the Bitwarden Client API is provided, includ
|
||||
## Usage
|
||||
|
||||
> [!IMPORTANT]
|
||||
> The web-vault requires the use a secure context for the [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API).
|
||||
> That means it will only work via `http://localhost:8000` (using the port from the example below) or if you [enable HTTPS](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS).
|
||||
> The web-vault requires the use of HTTPS and a secure context for the [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API). <br>
|
||||
> That means it will only work if you [enable HTTPS](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS). <br>
|
||||
> We also suggest to use a [reverse proxy](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples).
|
||||
|
||||
The recommended way to install and use Vaultwarden is via our container images which are published to [ghcr.io](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden), [docker.io](https://hub.docker.com/r/vaultwarden/server) and [quay.io](https://quay.io/repository/vaultwarden/server).
|
||||
See [which container image to use](https://github.com/dani-garcia/vaultwarden/wiki/Which-container-image-to-use) for an explanation of the provided tags.
|
||||
|
||||
@@ -2,21 +2,21 @@ use std::env;
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||
#[cfg(feature = "sqlite")]
|
||||
// These allow using e.g. #[cfg(mysql)] instead of #[cfg(feature = "mysql")], which helps when trying to add them through macros
|
||||
#[cfg(feature = "sqlite_system")] // The `sqlite` feature implies this one.
|
||||
println!("cargo:rustc-cfg=sqlite");
|
||||
#[cfg(feature = "mysql")]
|
||||
println!("cargo:rustc-cfg=mysql");
|
||||
#[cfg(feature = "postgresql")]
|
||||
println!("cargo:rustc-cfg=postgresql");
|
||||
#[cfg(feature = "s3")]
|
||||
println!("cargo:rustc-cfg=s3");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
#[cfg(not(any(feature = "sqlite_system", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!(
|
||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||
);
|
||||
|
||||
#[cfg(feature = "s3")]
|
||||
println!("cargo:rustc-cfg=s3");
|
||||
|
||||
// Use check-cfg to let cargo know which cfg's we define,
|
||||
// and avoid warnings when they are used in the code.
|
||||
println!("cargo::rustc-check-cfg=cfg(sqlite)");
|
||||
|
||||
+1
-1
@@ -2,4 +2,4 @@
|
||||
# see diesel.rs/guides/configuring-diesel-cli
|
||||
|
||||
[print_schema]
|
||||
file = "src/db/schema.rs"
|
||||
file = "src/db/schema.rs"
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
---
|
||||
vault_version: "v2025.12.1"
|
||||
vault_image_digest: "sha256:dc718ffec13eccab8a849d65dd436b38730577b9b46be4672d97debc88e2c0ad"
|
||||
vault_version: "v2026.4.1"
|
||||
vault_image_digest: "sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe"
|
||||
# Cross Compile Docker Helper Scripts v1.9.0
|
||||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
|
||||
xx_image_digest: "sha256:c64defb9ed5a91eacb37f96ccc3d4cd72521c4bd18d5442905b95e2226b0e707"
|
||||
rust_version: 1.92.0 # Rust version to be used
|
||||
rust_version: 1.95.0 # Rust version to be used
|
||||
debian_version: trixie # Debian release name to be used
|
||||
alpine_version: "3.23" # Alpine version to be used
|
||||
# For which platforms/architectures will we try to build images
|
||||
|
||||
+10
-10
@@ -19,23 +19,23 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2025.12.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.12.1
|
||||
# [docker.io/vaultwarden/web-vault@sha256:dc718ffec13eccab8a849d65dd436b38730577b9b46be4672d97debc88e2c0ad]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2026.4.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2026.4.1
|
||||
# [docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:dc718ffec13eccab8a849d65dd436b38730577b9b46be4672d97debc88e2c0ad
|
||||
# [docker.io/vaultwarden/web-vault:v2025.12.1]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe
|
||||
# [docker.io/vaultwarden/web-vault:v2026.4.1]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:dc718ffec13eccab8a849d65dd436b38730577b9b46be4672d97debc88e2c0ad AS vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe AS vault
|
||||
|
||||
########################## ALPINE BUILD IMAGES ##########################
|
||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 and linux/arm64
|
||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.92.0 AS build_amd64
|
||||
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.92.0 AS build_arm64
|
||||
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.92.0 AS build_armv7
|
||||
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.92.0 AS build_armv6
|
||||
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.95.0 AS build_amd64
|
||||
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.95.0 AS build_arm64
|
||||
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.95.0 AS build_armv7
|
||||
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.95.0 AS build_armv6
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
|
||||
@@ -19,15 +19,15 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2025.12.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.12.1
|
||||
# [docker.io/vaultwarden/web-vault@sha256:dc718ffec13eccab8a849d65dd436b38730577b9b46be4672d97debc88e2c0ad]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2026.4.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2026.4.1
|
||||
# [docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:dc718ffec13eccab8a849d65dd436b38730577b9b46be4672d97debc88e2c0ad
|
||||
# [docker.io/vaultwarden/web-vault:v2025.12.1]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe
|
||||
# [docker.io/vaultwarden/web-vault:v2026.4.1]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:dc718ffec13eccab8a849d65dd436b38730577b9b46be4672d97debc88e2c0ad AS vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe AS vault
|
||||
|
||||
########################## Cross Compile Docker Helper Scripts ##########################
|
||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||
@@ -36,7 +36,7 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c64defb9ed5a91eacb37f
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.92.0-slim-trixie AS build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.95.0-slim-trixie AS build
|
||||
COPY --from=xx / /
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
||||
@@ -19,13 +19,13 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" docker.io/vaultwarden/web-vault:{{ vault_version }}
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:{{ vault_version | replace('+', '_') }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" docker.io/vaultwarden/web-vault:{{ vault_version | replace('+', '_') }}
|
||||
# [docker.io/vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||
# [docker.io/vaultwarden/web-vault:{{ vault_version | replace('+', '_') }}]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} AS vault
|
||||
|
||||
|
||||
+2
-2
@@ -13,8 +13,8 @@ path = "src/lib.rs"
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
quote = "1.0.42"
|
||||
syn = "2.0.111"
|
||||
quote = "1.0.45"
|
||||
syn = "2.0.117"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS archives;
|
||||
@@ -0,0 +1,10 @@
|
||||
DROP TABLE IF EXISTS archives;
|
||||
|
||||
CREATE TABLE archives (
|
||||
user_uuid CHAR(36) NOT NULL,
|
||||
cipher_uuid CHAR(36) NOT NULL,
|
||||
archived_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (user_uuid, cipher_uuid),
|
||||
FOREIGN KEY (user_uuid) REFERENCES users (uuid) ON DELETE CASCADE,
|
||||
FOREIGN KEY (cipher_uuid) REFERENCES ciphers (uuid) ON DELETE CASCADE
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE sso_auth DROP COLUMN binding_hash;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE sso_auth ADD COLUMN binding_hash TEXT;
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS archives;
|
||||
@@ -0,0 +1,8 @@
|
||||
DROP TABLE IF EXISTS archives;
|
||||
|
||||
CREATE TABLE archives (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
|
||||
archived_at TIMESTAMP NOT NULL DEFAULT now(),
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE sso_auth DROP COLUMN binding_hash;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE sso_auth ADD COLUMN binding_hash TEXT;
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS archives;
|
||||
@@ -0,0 +1,8 @@
|
||||
DROP TABLE IF EXISTS archives;
|
||||
|
||||
CREATE TABLE archives (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
|
||||
archived_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE sso_auth DROP COLUMN binding_hash;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE sso_auth ADD COLUMN binding_hash TEXT;
|
||||
+1
-1
@@ -1,4 +1,4 @@
|
||||
[toolchain]
|
||||
channel = "1.92.0"
|
||||
channel = "1.95.0"
|
||||
components = [ "rustfmt", "clippy" ]
|
||||
profile = "minimal"
|
||||
|
||||
+82
-27
@@ -30,9 +30,10 @@ use crate::{
|
||||
error::{Error, MapResult},
|
||||
http_client::make_http_request,
|
||||
mail,
|
||||
sso::FAKE_SSO_IDENTIFIER,
|
||||
util::{
|
||||
container_base_image, format_naive_datetime_local, get_display_size, get_web_vault_version,
|
||||
is_running_in_container, NumberOrString,
|
||||
container_base_image, format_naive_datetime_local, get_active_web_release, get_display_size,
|
||||
is_running_in_container, parse_experimental_client_feature_flags, FeatureFlagFilter, NumberOrString,
|
||||
},
|
||||
CONFIG, VERSION,
|
||||
};
|
||||
@@ -315,7 +316,11 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -
|
||||
|
||||
async fn _generate_invite(user: &User, conn: &DbConn) -> EmptyResult {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
|
||||
let org_id: OrganizationId = if CONFIG.sso_enabled() {
|
||||
FAKE_SSO_IDENTIFIER.into()
|
||||
} else {
|
||||
FAKE_ADMIN_UUID.into()
|
||||
};
|
||||
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
|
||||
mail::send_invite(user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
@@ -464,7 +469,7 @@ async fn deauth_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Noti
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
for device in Device::find_push_devices_by_user(&user.uuid, &conn).await {
|
||||
match unregister_push_device(&device.push_uuid).await {
|
||||
match unregister_push_device(device.push_uuid.as_ref()).await {
|
||||
Ok(r) => r,
|
||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {e}"),
|
||||
};
|
||||
@@ -472,7 +477,7 @@ async fn deauth_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Noti
|
||||
}
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.reset_security_stamp(&conn).await?;
|
||||
|
||||
user.save(&conn).await
|
||||
}
|
||||
@@ -480,14 +485,15 @@ async fn deauth_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Noti
|
||||
#[post("/users/<user_id>/disable", format = "application/json")]
|
||||
async fn disable_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&user_id, &conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.reset_security_stamp(&conn).await?;
|
||||
user.enabled = false;
|
||||
|
||||
let save_result = user.save(&conn).await;
|
||||
|
||||
nt.send_logout(&user, None, &conn).await;
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
@@ -517,7 +523,11 @@ async fn resend_user_invite(user_id: UserId, _token: AdminToken, conn: DbConn) -
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
|
||||
let org_id: OrganizationId = if CONFIG.sso_enabled() {
|
||||
FAKE_SSO_IDENTIFIER.into()
|
||||
} else {
|
||||
FAKE_ADMIN_UUID.into()
|
||||
};
|
||||
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
|
||||
mail::send_invite(&user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
@@ -637,7 +647,6 @@ use cached::proc_macro::cached;
|
||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already
|
||||
/// It will cache this function for 600 seconds (10 minutes) which should prevent the exhaustion of the rate limit
|
||||
/// Any cache will be lost if Vaultwarden is restarted
|
||||
use std::time::Duration; // Needed for cached
|
||||
#[cached(time = 600, sync_writes = "default")]
|
||||
async fn get_release_info(has_http_access: bool) -> (String, String, String) {
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
@@ -689,6 +698,26 @@ async fn get_ntp_time(has_http_access: bool) -> String {
|
||||
String::from("Unable to fetch NTP time.")
|
||||
}
|
||||
|
||||
fn web_vault_compare(active: &str, latest: &str) -> i8 {
|
||||
use semver::Version;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
let active_semver = Version::parse(active).unwrap_or_else(|e| {
|
||||
warn!("Unable to parse active web-vault version '{active}': {e}");
|
||||
Version::parse("2025.1.1").unwrap()
|
||||
});
|
||||
let latest_semver = Version::parse(latest).unwrap_or_else(|e| {
|
||||
warn!("Unable to parse latest web-vault version '{latest}': {e}");
|
||||
Version::parse("2025.1.1").unwrap()
|
||||
});
|
||||
|
||||
match active_semver.cmp(&latest_semver) {
|
||||
Ordering::Less => -1,
|
||||
Ordering::Equal => 0,
|
||||
Ordering::Greater => 1,
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use chrono::prelude::*;
|
||||
@@ -708,32 +737,28 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> A
|
||||
_ => "Unable to resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) = get_release_info(has_http_access).await;
|
||||
let (latest_vw_release, latest_vw_commit, latest_web_release) = get_release_info(has_http_access).await;
|
||||
let active_web_release = get_active_web_release();
|
||||
let web_vault_compare = web_vault_compare(&active_web_release, &latest_web_release);
|
||||
|
||||
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version = get_web_vault_version();
|
||||
|
||||
// Check if the running version is newer than the latest stable released version
|
||||
let web_vault_pre_release = if let Ok(web_ver_match) = semver::VersionReq::parse(&format!(">{latest_web_build}")) {
|
||||
web_ver_match.matches(
|
||||
&semver::Version::parse(&web_vault_version).unwrap_or_else(|_| semver::Version::parse("2025.1.1").unwrap()),
|
||||
)
|
||||
} else {
|
||||
error!("Unable to parse latest_web_build: '{latest_web_build}'");
|
||||
false
|
||||
};
|
||||
let invalid_feature_flags: Vec<String> = parse_experimental_client_feature_flags(
|
||||
&CONFIG.experimental_client_feature_flags(),
|
||||
FeatureFlagFilter::InvalidOnly,
|
||||
)
|
||||
.into_keys()
|
||||
.collect();
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
"current_release": VERSION,
|
||||
"latest_release": latest_release,
|
||||
"latest_commit": latest_commit,
|
||||
"latest_release": latest_vw_release,
|
||||
"latest_commit": latest_vw_commit,
|
||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||
"web_vault_version": web_vault_version,
|
||||
"latest_web_build": latest_web_build,
|
||||
"web_vault_pre_release": web_vault_pre_release,
|
||||
"active_web_release": active_web_release,
|
||||
"latest_web_release": latest_web_release,
|
||||
"web_vault_compare": web_vault_compare,
|
||||
"running_within_container": running_within_container,
|
||||
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
||||
"has_http_access": has_http_access,
|
||||
@@ -747,6 +772,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> A
|
||||
"db_version": get_sql_server_version(&conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"invalid_feature_flags": invalid_feature_flags,
|
||||
"host_arch": env::consts::ARCH,
|
||||
"host_os": env::consts::OS,
|
||||
"tz_env": env::var("TZ").unwrap_or_default(),
|
||||
@@ -844,3 +870,32 @@ impl<'r> FromRequest<'r> for AdminToken {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn validate_web_vault_compare() {
|
||||
// web_vault_compare(active, latest)
|
||||
// Test normal versions
|
||||
assert!(web_vault_compare("2025.12.0", "2025.12.1") == -1);
|
||||
assert!(web_vault_compare("2025.12.1", "2025.12.1") == 0);
|
||||
assert!(web_vault_compare("2025.12.2", "2025.12.1") == 1);
|
||||
|
||||
// Test patched/+build.n versions
|
||||
// Newer latest version
|
||||
assert!(web_vault_compare("2025.12.0+build.1", "2025.12.1") == -1);
|
||||
assert!(web_vault_compare("2025.12.1", "2025.12.1+build.1") == -1);
|
||||
assert!(web_vault_compare("2025.12.0+build.1", "2025.12.1+build.1") == -1);
|
||||
assert!(web_vault_compare("2025.12.1+build.1", "2025.12.1+build.2") == -1);
|
||||
// Equal versions
|
||||
assert!(web_vault_compare("2025.12.1+build.1", "2025.12.1+build.1") == 0);
|
||||
assert!(web_vault_compare("2025.12.2+build.2", "2025.12.2+build.2") == 0);
|
||||
// Newer active version
|
||||
assert!(web_vault_compare("2025.12.1+build.1", "2025.12.1") == 1);
|
||||
assert!(web_vault_compare("2025.12.2", "2025.12.1+build.1") == 1);
|
||||
assert!(web_vault_compare("2025.12.2+build.1", "2025.12.1+build.1") == 1);
|
||||
assert!(web_vault_compare("2025.12.1+build.3", "2025.12.1+build.2") == 1);
|
||||
}
|
||||
}
|
||||
|
||||
+48
-44
@@ -22,7 +22,7 @@ use crate::{
|
||||
DbConn,
|
||||
},
|
||||
mail,
|
||||
util::{format_date, NumberOrString},
|
||||
util::{deser_opt_nonempty_str, format_date, NumberOrString},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -33,7 +33,6 @@ use rocket::{
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
register,
|
||||
profile,
|
||||
put_profile,
|
||||
post_profile,
|
||||
@@ -107,7 +106,6 @@ pub struct RegisterData {
|
||||
|
||||
name: Option<String>,
|
||||
|
||||
#[allow(dead_code)]
|
||||
organization_user_id: Option<MembershipId>,
|
||||
|
||||
// Used only from the register/finish endpoint
|
||||
@@ -139,7 +137,7 @@ struct KeysData {
|
||||
}
|
||||
|
||||
/// Trims whitespace from password hints, and converts blank password hints to `None`.
|
||||
fn clean_password_hint(password_hint: &Option<String>) -> Option<String> {
|
||||
fn clean_password_hint(password_hint: Option<&String>) -> Option<String> {
|
||||
match password_hint {
|
||||
None => None,
|
||||
Some(h) => match h.trim() {
|
||||
@@ -149,7 +147,7 @@ fn clean_password_hint(password_hint: &Option<String>) -> Option<String> {
|
||||
}
|
||||
}
|
||||
|
||||
fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult {
|
||||
fn enforce_password_hint_setting(password_hint: Option<&String>) -> EmptyResult {
|
||||
if password_hint.is_some() && !CONFIG.password_hints_allowed() {
|
||||
err!("Password hints have been disabled by the administrator. Remove the hint and try again.");
|
||||
}
|
||||
@@ -168,11 +166,6 @@ async fn is_email_2fa_required(member_id: Option<MembershipId>, conn: &DbConn) -
|
||||
false
|
||||
}
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
async fn register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, false, conn).await
|
||||
}
|
||||
|
||||
pub async fn _register(data: Json<RegisterData>, email_verification: bool, conn: DbConn) -> JsonResult {
|
||||
let mut data: RegisterData = data.into_inner();
|
||||
let email = data.email.to_lowercase();
|
||||
@@ -252,8 +245,8 @@ pub async fn _register(data: Json<RegisterData>, email_verification: bool, conn:
|
||||
|
||||
// Check against the password hint setting here so if it fails, the user
|
||||
// can retry without losing their invitation below.
|
||||
let password_hint = clean_password_hint(&data.master_password_hint);
|
||||
enforce_password_hint_setting(&password_hint)?;
|
||||
let password_hint = clean_password_hint(data.master_password_hint.as_ref());
|
||||
enforce_password_hint_setting(password_hint.as_ref())?;
|
||||
|
||||
let mut user = match User::find_by_mail(&email, &conn).await {
|
||||
Some(user) => {
|
||||
@@ -302,7 +295,7 @@ pub async fn _register(data: Json<RegisterData>, email_verification: bool, conn:
|
||||
|
||||
set_kdf_data(&mut user, &data.kdf)?;
|
||||
|
||||
user.set_password(&data.master_password_hash, Some(data.key), true, None);
|
||||
user.set_password(&data.master_password_hash, Some(data.key), true, None, &conn).await?;
|
||||
user.password_hint = password_hint;
|
||||
|
||||
// Add extra fields if present
|
||||
@@ -360,8 +353,8 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, conn:
|
||||
|
||||
// Check against the password hint setting here so if it fails,
|
||||
// the user can retry without losing their invitation below.
|
||||
let password_hint = clean_password_hint(&data.master_password_hint);
|
||||
enforce_password_hint_setting(&password_hint)?;
|
||||
let password_hint = clean_password_hint(data.master_password_hint.as_ref());
|
||||
enforce_password_hint_setting(password_hint.as_ref())?;
|
||||
|
||||
set_kdf_data(&mut user, &data.kdf)?;
|
||||
|
||||
@@ -370,7 +363,9 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, conn:
|
||||
Some(data.key),
|
||||
false,
|
||||
Some(vec![String::from("revision_date")]), // We need to allow revision-date to use the old security_timestamp
|
||||
);
|
||||
&conn,
|
||||
)
|
||||
.await?;
|
||||
user.password_hint = password_hint;
|
||||
|
||||
if let Some(keys) = data.keys {
|
||||
@@ -379,15 +374,13 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, conn:
|
||||
}
|
||||
|
||||
if let Some(identifier) = data.org_identifier {
|
||||
if identifier != crate::sso::FAKE_IDENTIFIER && identifier != crate::api::admin::FAKE_ADMIN_UUID {
|
||||
let org = match Organization::find_by_uuid(&identifier.into(), &conn).await {
|
||||
None => err!("Failed to retrieve the associated organization"),
|
||||
Some(org) => org,
|
||||
if identifier != crate::sso::FAKE_SSO_IDENTIFIER && identifier != crate::api::admin::FAKE_ADMIN_UUID {
|
||||
let Some(org) = Organization::find_by_uuid(&identifier.into(), &conn).await else {
|
||||
err!("Failed to retrieve the associated organization")
|
||||
};
|
||||
|
||||
let membership = match Membership::find_by_user_and_org(&user.uuid, &org.uuid, &conn).await {
|
||||
None => err!("Failed to retrieve the invitation"),
|
||||
Some(org) => org,
|
||||
let Some(membership) = Membership::find_by_user_and_org(&user.uuid, &org.uuid, &conn).await else {
|
||||
err!("Failed to retrieve the invitation")
|
||||
};
|
||||
|
||||
accept_org_invite(&user, membership, None, &conn).await?;
|
||||
@@ -522,8 +515,8 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, conn: DbCon
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.password_hint = clean_password_hint(&data.master_password_hint);
|
||||
enforce_password_hint_setting(&user.password_hint)?;
|
||||
user.password_hint = clean_password_hint(data.master_password_hint.as_ref());
|
||||
enforce_password_hint_setting(user.password_hint.as_ref())?;
|
||||
|
||||
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn)
|
||||
.await;
|
||||
@@ -538,14 +531,16 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, conn: DbCon
|
||||
String::from("get_public_keys"),
|
||||
String::from("get_api_webauthn"),
|
||||
]),
|
||||
);
|
||||
&conn,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let save_result = user.save(&conn).await;
|
||||
|
||||
// Prevent logging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone()), &conn).await;
|
||||
nt.send_logout(&user, Some(&headers.device), &conn).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -585,7 +580,6 @@ fn set_kdf_data(user: &mut User, data: &KDFData) -> EmptyResult {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct AuthenticationData {
|
||||
@@ -594,7 +588,6 @@ struct AuthenticationData {
|
||||
master_password_authentication_hash: String,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UnlockData {
|
||||
@@ -603,11 +596,12 @@ struct UnlockData {
|
||||
master_key_wrapped_user_key: String,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ChangeKdfData {
|
||||
#[allow(dead_code)]
|
||||
new_master_password_hash: String,
|
||||
#[allow(dead_code)]
|
||||
key: String,
|
||||
authentication_data: AuthenticationData,
|
||||
unlock_data: UnlockData,
|
||||
@@ -639,10 +633,12 @@ async fn post_kdf(data: Json<ChangeKdfData>, headers: Headers, conn: DbConn, nt:
|
||||
Some(data.unlock_data.master_key_wrapped_user_key),
|
||||
true,
|
||||
None,
|
||||
);
|
||||
&conn,
|
||||
)
|
||||
.await?;
|
||||
let save_result = user.save(&conn).await;
|
||||
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone()), &conn).await;
|
||||
nt.send_logout(&user, Some(&headers.device), &conn).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -653,6 +649,7 @@ struct UpdateFolderData {
|
||||
// There is a bug in 2024.3.x which adds a `null` item.
|
||||
// To bypass this we allow a Option here, but skip it during the updates
|
||||
// See: https://github.com/bitwarden/clients/issues/8453
|
||||
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
|
||||
id: Option<FolderId>,
|
||||
name: String,
|
||||
}
|
||||
@@ -906,14 +903,16 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
Some(data.account_unlock_data.master_password_unlock_data.master_key_encrypted_user_key),
|
||||
true,
|
||||
None,
|
||||
);
|
||||
&conn,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let save_result = user.save(&conn).await;
|
||||
|
||||
// Prevent logging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone()), &conn).await;
|
||||
nt.send_logout(&user, Some(&headers.device), &conn).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -925,12 +924,13 @@ async fn post_sstamp(data: Json<PasswordOrOtpData>, headers: Headers, conn: DbCo
|
||||
|
||||
data.validate(&user, true, &conn).await?;
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.reset_security_stamp(&conn).await?;
|
||||
let save_result = user.save(&conn).await;
|
||||
|
||||
nt.send_logout(&user, None, &conn).await;
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
@@ -1048,7 +1048,7 @@ async fn post_email(data: Json<ChangeEmailData>, headers: Headers, conn: DbConn,
|
||||
user.email_new = None;
|
||||
user.email_new_token = None;
|
||||
|
||||
user.set_password(&data.new_master_password_hash, Some(data.key), true, None);
|
||||
user.set_password(&data.new_master_password_hash, Some(data.key), true, None, &conn).await?;
|
||||
|
||||
let save_result = user.save(&conn).await;
|
||||
|
||||
@@ -1199,10 +1199,9 @@ async fn password_hint(data: Json<PasswordHintData>, conn: DbConn) -> EmptyResul
|
||||
// There is still a timing side channel here in that the code
|
||||
// paths that send mail take noticeably longer than ones that
|
||||
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||
let mut rng = SmallRng::from_os_rng();
|
||||
let delta: i32 = 100;
|
||||
let sleep_ms = (1_000 + rng.random_range(-delta..=delta)) as u64;
|
||||
use rand::{rngs::SmallRng, RngExt};
|
||||
let mut rng: SmallRng = rand::make_rng();
|
||||
let sleep_ms = rng.random_range(900..=1100) as u64;
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
|
||||
Ok(())
|
||||
} else {
|
||||
@@ -1261,7 +1260,7 @@ struct SecretVerificationRequest {
|
||||
pub async fn kdf_upgrade(user: &mut User, pwd_hash: &str, conn: &DbConn) -> ApiResult<()> {
|
||||
if user.password_iterations < CONFIG.password_iterations() {
|
||||
user.password_iterations = CONFIG.password_iterations();
|
||||
user.set_password(pwd_hash, None, false, None);
|
||||
user.set_password(pwd_hash, None, false, None, conn).await?;
|
||||
|
||||
if let Err(e) = user.save(conn).await {
|
||||
error!("Error updating user: {e:#?}");
|
||||
@@ -1335,6 +1334,11 @@ impl<'r> FromRequest<'r> for KnownDevice {
|
||||
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
|
||||
// Bitwarden seems to send padded Base64 strings since 2026.2.1
|
||||
// Since these values are not streamed and Headers are always split by newlines
|
||||
// we can safely ignore padding here and remove any '=' appended.
|
||||
let email_b64 = email_b64.trim_end_matches('=');
|
||||
|
||||
let Ok(email_bytes) = data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) else {
|
||||
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
|
||||
};
|
||||
@@ -1434,7 +1438,7 @@ async fn put_clear_device_token(device_id: DeviceId, conn: DbConn) -> EmptyResul
|
||||
|
||||
if let Some(device) = Device::find_by_uuid(&device_id, &conn).await {
|
||||
Device::clear_push_token_by_uuid(&device_id, &conn).await?;
|
||||
unregister_push_device(&device.push_uuid).await?;
|
||||
unregister_push_device(device.push_uuid.as_ref()).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1704,6 +1708,6 @@ pub async fn purge_auth_requests(pool: DbPool) {
|
||||
if let Ok(conn) = pool.get().await {
|
||||
AuthRequest::purge_expired_auth_requests(&conn).await;
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging trashed ciphers")
|
||||
error!("Failed to get DB connection while purging auth requests")
|
||||
}
|
||||
}
|
||||
|
||||
+260
-68
@@ -11,17 +11,17 @@ use rocket::{
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::auth::ClientVersion;
|
||||
use crate::util::{save_temp_file, NumberOrString};
|
||||
use crate::util::{deser_opt_nonempty_str, save_temp_file, NumberOrString};
|
||||
use crate::{
|
||||
api::{self, core::log_event, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType},
|
||||
auth::Headers,
|
||||
auth::{Headers, OrgIdGuard, OwnerHeaders},
|
||||
config::PathType,
|
||||
crypto,
|
||||
db::{
|
||||
models::{
|
||||
Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId,
|
||||
CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership, MembershipType,
|
||||
OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
|
||||
Archive, Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup,
|
||||
CollectionId, CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership,
|
||||
MembershipType, OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
|
||||
},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
@@ -86,7 +86,8 @@ pub fn routes() -> Vec<Route> {
|
||||
restore_cipher_put_admin,
|
||||
restore_cipher_selected,
|
||||
restore_cipher_selected_admin,
|
||||
delete_all,
|
||||
purge_org_vault,
|
||||
purge_personal_vault,
|
||||
move_cipher_selected,
|
||||
move_cipher_selected_put,
|
||||
put_collections2_update,
|
||||
@@ -95,6 +96,10 @@ pub fn routes() -> Vec<Route> {
|
||||
post_collections_update,
|
||||
post_collections_admin,
|
||||
put_collections_admin,
|
||||
archive_cipher_put,
|
||||
archive_cipher_selected,
|
||||
unarchive_cipher_put,
|
||||
unarchive_cipher_selected,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -247,6 +252,7 @@ pub struct CipherData {
|
||||
// Id is optional as it is included only in bulk share
|
||||
pub id: Option<CipherId>,
|
||||
// Folder id is not included in import
|
||||
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
|
||||
pub folder_id: Option<FolderId>,
|
||||
// TODO: Some of these might appear all the time, no need for Option
|
||||
#[serde(alias = "organizationID")]
|
||||
@@ -291,11 +297,13 @@ pub struct CipherData {
|
||||
// when using older client versions, or if the operation doesn't involve
|
||||
// updating an existing cipher.
|
||||
last_known_revision_date: Option<String>,
|
||||
archived_date: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PartialCipherData {
|
||||
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
|
||||
folder_id: Option<FolderId>,
|
||||
favorite: bool,
|
||||
}
|
||||
@@ -425,7 +433,7 @@ pub async fn update_cipher_from_data(
|
||||
let transfer_cipher = cipher.organization_uuid.is_none() && data.organization_id.is_some();
|
||||
|
||||
if let Some(org_id) = data.organization_id {
|
||||
match Membership::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
|
||||
match Membership::find_confirmed_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
|
||||
None => err!("You don't have permission to add item to organization"),
|
||||
Some(member) => {
|
||||
if shared_to_collections.is_some()
|
||||
@@ -531,6 +539,13 @@ pub async fn update_cipher_from_data(
|
||||
cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?;
|
||||
cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?;
|
||||
|
||||
if let Some(dt_str) = data.archived_date {
|
||||
match NaiveDateTime::parse_from_str(&dt_str, "%+") {
|
||||
Ok(dt) => cipher.set_archived_at(dt, &headers.user.uuid, conn).await?,
|
||||
Err(err) => warn!("Error parsing ArchivedDate '{dt_str}': {err}"),
|
||||
}
|
||||
}
|
||||
|
||||
if ut != UpdateType::None {
|
||||
// Only log events for organizational ciphers
|
||||
if let Some(org_id) = &cipher.organization_uuid {
|
||||
@@ -627,7 +642,7 @@ async fn post_ciphers_import(data: Json<ImportData>, headers: Headers, conn: DbC
|
||||
|
||||
let mut user = headers.user;
|
||||
user.update_revision(&conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, headers.device.push_uuid.as_ref(), &conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -715,9 +730,13 @@ async fn put_cipher_partial(
|
||||
let data: PartialCipherData = data.into_inner();
|
||||
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
err!("Cipher does not exist")
|
||||
};
|
||||
|
||||
if !cipher.is_accessible_to_user(&headers.user.uuid, &conn).await {
|
||||
err!("Cipher does not exist", "Cipher is not accessible for the current user")
|
||||
}
|
||||
|
||||
if let Some(ref folder_id) = data.folder_id {
|
||||
if Folder::find_by_uuid_and_user(folder_id, &headers.user.uuid, &conn).await.is_none() {
|
||||
err!("Invalid folder", "Folder does not exist or belongs to another user");
|
||||
@@ -795,12 +814,16 @@ async fn post_collections_update(
|
||||
err!("Collection cannot be changed")
|
||||
}
|
||||
|
||||
let Some(ref org_uuid) = cipher.organization_uuid else {
|
||||
err!("Cipher is not owned by an organization")
|
||||
};
|
||||
|
||||
let posted_collections = HashSet::<CollectionId>::from_iter(data.collection_ids);
|
||||
let current_collections =
|
||||
HashSet::<CollectionId>::from_iter(cipher.get_collections(headers.user.uuid.clone(), &conn).await);
|
||||
|
||||
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
||||
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await {
|
||||
match Collection::find_by_uuid_and_org(collection, org_uuid, &conn).await {
|
||||
None => err!("Invalid collection ID provided"),
|
||||
Some(collection) => {
|
||||
if collection.is_writable_by_user(&headers.user.uuid, &conn).await {
|
||||
@@ -831,7 +854,7 @@ async fn post_collections_update(
|
||||
log_event(
|
||||
EventType::CipherUpdatedCollections as i32,
|
||||
&cipher.uuid,
|
||||
&cipher.organization_uuid.clone().unwrap(),
|
||||
org_uuid,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
@@ -871,12 +894,16 @@ async fn post_collections_admin(
|
||||
err!("Collection cannot be changed")
|
||||
}
|
||||
|
||||
let Some(ref org_uuid) = cipher.organization_uuid else {
|
||||
err!("Cipher is not owned by an organization")
|
||||
};
|
||||
|
||||
let posted_collections = HashSet::<CollectionId>::from_iter(data.collection_ids);
|
||||
let current_collections =
|
||||
HashSet::<CollectionId>::from_iter(cipher.get_admin_collections(headers.user.uuid.clone(), &conn).await);
|
||||
|
||||
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
||||
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await {
|
||||
match Collection::find_by_uuid_and_org(collection, org_uuid, &conn).await {
|
||||
None => err!("Invalid collection ID provided"),
|
||||
Some(collection) => {
|
||||
if collection.is_writable_by_user(&headers.user.uuid, &conn).await {
|
||||
@@ -907,7 +934,7 @@ async fn post_collections_admin(
|
||||
log_event(
|
||||
EventType::CipherUpdatedCollections as i32,
|
||||
&cipher.uuid,
|
||||
&cipher.organization_uuid.unwrap(),
|
||||
org_uuid,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
@@ -998,7 +1025,7 @@ async fn put_cipher_share_selected(
|
||||
}
|
||||
|
||||
// Multi share actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), &conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1564,6 +1591,7 @@ async fn restore_cipher_selected(
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct MoveCipherData {
|
||||
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
|
||||
folder_id: Option<FolderId>,
|
||||
ids: Vec<CipherId>,
|
||||
}
|
||||
@@ -1610,7 +1638,7 @@ async fn move_cipher_selected(
|
||||
.await;
|
||||
} else {
|
||||
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), &conn).await;
|
||||
}
|
||||
|
||||
if cipher_count != accessible_ciphers_count {
|
||||
@@ -1638,9 +1666,51 @@ struct OrganizationIdData {
|
||||
org_id: OrganizationId,
|
||||
}
|
||||
|
||||
// Use the OrgIdGuard here, to ensure there an organization id present.
|
||||
// If there is no organization id present, it should be forwarded to purge_personal_vault.
|
||||
// This guard needs to be the first argument, else OwnerHeaders will be triggered which will logout the user.
|
||||
#[post("/ciphers/purge?<organization..>", data = "<data>")]
|
||||
async fn delete_all(
|
||||
organization: Option<OrganizationIdData>,
|
||||
async fn purge_org_vault(
|
||||
_org_id_guard: OrgIdGuard,
|
||||
organization: OrganizationIdData,
|
||||
data: Json<PasswordOrOtpData>,
|
||||
headers: OwnerHeaders,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
if organization.org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
data.validate(&user, true, &conn).await?;
|
||||
|
||||
match Membership::find_confirmed_by_user_and_org(&user.uuid, &organization.org_id, &conn).await {
|
||||
Some(member) if member.atype == MembershipType::Owner => {
|
||||
Cipher::delete_all_by_organization(&organization.org_id, &conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, headers.device.push_uuid.as_ref(), &conn).await;
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationPurgedVault as i32,
|
||||
&organization.org_id,
|
||||
&organization.org_id,
|
||||
&user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
_ => err!("You don't have permission to purge the organization vault"),
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/ciphers/purge", data = "<data>")]
|
||||
async fn purge_personal_vault(
|
||||
data: Json<PasswordOrOtpData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
@@ -1651,52 +1721,48 @@ async fn delete_all(
|
||||
|
||||
data.validate(&user, true, &conn).await?;
|
||||
|
||||
match organization {
|
||||
Some(org_data) => {
|
||||
// Organization ID in query params, purging organization vault
|
||||
match Membership::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn).await {
|
||||
None => err!("You don't have permission to purge the organization vault"),
|
||||
Some(member) => {
|
||||
if member.atype == MembershipType::Owner {
|
||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationPurgedVault as i32,
|
||||
&org_data.org_id,
|
||||
&org_data.org_id,
|
||||
&user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
err!("You don't have permission to purge the organization vault");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// No organization ID in query params, purging user vault
|
||||
// Delete ciphers and their attachments
|
||||
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn).await {
|
||||
cipher.delete(&conn).await?;
|
||||
}
|
||||
|
||||
// Delete folders
|
||||
for f in Folder::find_by_user(&user.uuid, &conn).await {
|
||||
f.delete(&conn).await?;
|
||||
}
|
||||
|
||||
user.update_revision(&conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn).await {
|
||||
cipher.delete(&conn).await?;
|
||||
}
|
||||
|
||||
for f in Folder::find_by_user(&user.uuid, &conn).await {
|
||||
f.delete(&conn).await?;
|
||||
}
|
||||
|
||||
user.update_revision(&conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, headers.device.push_uuid.as_ref(), &conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/archive")]
|
||||
async fn archive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
archive_cipher(&cipher_id, &headers, false, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/archive", data = "<data>")]
|
||||
async fn archive_cipher_selected(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
archive_multiple_ciphers(data, &headers, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/unarchive")]
|
||||
async fn unarchive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
unarchive_cipher(&cipher_id, &headers, false, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/unarchive", data = "<data>")]
|
||||
async fn unarchive_cipher_selected(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
unarchive_multiple_ciphers(data, &headers, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
@@ -1789,7 +1855,7 @@ async fn _delete_multiple_ciphers(
|
||||
}
|
||||
|
||||
// Multi delete actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), &conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1857,7 +1923,7 @@ async fn _restore_multiple_ciphers(
|
||||
}
|
||||
|
||||
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await;
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"data": ciphers,
|
||||
@@ -1917,6 +1983,122 @@ async fn _delete_cipher_attachment_by_id(
|
||||
Ok(Json(json!({"cipher":cipher_json})))
|
||||
}
|
||||
|
||||
async fn archive_cipher(
|
||||
cipher_id: &CipherId,
|
||||
headers: &Headers,
|
||||
multi_archive: bool,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await {
|
||||
err!("Cipher is not accessible for the current user")
|
||||
}
|
||||
|
||||
cipher.set_archived_at(Utc::now().naive_utc(), &headers.user.uuid, conn).await?;
|
||||
|
||||
if !multi_archive {
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device,
|
||||
None,
|
||||
conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
|
||||
}
|
||||
|
||||
async fn unarchive_cipher(
|
||||
cipher_id: &CipherId,
|
||||
headers: &Headers,
|
||||
multi_unarchive: bool,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await {
|
||||
err!("Cipher is not accessible for the current user")
|
||||
}
|
||||
|
||||
cipher.unarchive(&headers.user.uuid, conn).await?;
|
||||
|
||||
if !multi_unarchive {
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device,
|
||||
None,
|
||||
conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
|
||||
}
|
||||
|
||||
async fn archive_multiple_ciphers(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: &Headers,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data = data.into_inner();
|
||||
|
||||
let mut ciphers: Vec<Value> = Vec::new();
|
||||
for cipher_id in data.ids {
|
||||
match archive_cipher(&cipher_id, headers, true, conn, nt).await {
|
||||
Ok(json) => ciphers.push(json.into_inner()),
|
||||
err => return err,
|
||||
}
|
||||
}
|
||||
|
||||
// Multi archive does not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"data": ciphers,
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
async fn unarchive_multiple_ciphers(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: &Headers,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data = data.into_inner();
|
||||
|
||||
let mut ciphers: Vec<Value> = Vec::new();
|
||||
for cipher_id in data.ids {
|
||||
match unarchive_cipher(&cipher_id, headers, true, conn, nt).await {
|
||||
Ok(json) => ciphers.push(json.into_inner()),
|
||||
err => return err,
|
||||
}
|
||||
}
|
||||
|
||||
// Multi unarchive does not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"data": ciphers,
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
/// This will hold all the necessary data to improve a full sync of all the ciphers
|
||||
/// It can be used during the `Cipher::to_json()` call.
|
||||
/// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed.
|
||||
@@ -1926,6 +2108,7 @@ pub struct CipherSyncData {
|
||||
pub cipher_folders: HashMap<CipherId, FolderId>,
|
||||
pub cipher_favorites: HashSet<CipherId>,
|
||||
pub cipher_collections: HashMap<CipherId, Vec<CollectionId>>,
|
||||
pub cipher_archives: HashMap<CipherId, NaiveDateTime>,
|
||||
pub members: HashMap<OrganizationId, Membership>,
|
||||
pub user_collections: HashMap<CollectionId, CollectionUser>,
|
||||
pub user_collections_groups: HashMap<CollectionId, CollectionGroup>,
|
||||
@@ -1942,20 +2125,25 @@ impl CipherSyncData {
|
||||
pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &DbConn) -> Self {
|
||||
let cipher_folders: HashMap<CipherId, FolderId>;
|
||||
let cipher_favorites: HashSet<CipherId>;
|
||||
let cipher_archives: HashMap<CipherId, NaiveDateTime>;
|
||||
match sync_type {
|
||||
// User Sync supports Folders and Favorites
|
||||
// User Sync supports Folders, Favorites, and Archives
|
||||
CipherSyncType::User => {
|
||||
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
|
||||
cipher_folders = FolderCipher::find_by_user(user_id, conn).await.into_iter().collect();
|
||||
|
||||
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
|
||||
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_id, conn).await.into_iter().collect();
|
||||
|
||||
// Generate a HashMap with the Cipher UUID as key and the archived date time as value
|
||||
cipher_archives = Archive::find_by_user(user_id, conn).await.into_iter().collect();
|
||||
}
|
||||
// Organization Sync does not support Folders and Favorites.
|
||||
// Organization Sync does not support Folders, Favorites, or Archives.
|
||||
// If these are set, it will cause issues in the web-vault.
|
||||
CipherSyncType::Organization => {
|
||||
cipher_folders = HashMap::with_capacity(0);
|
||||
cipher_favorites = HashSet::with_capacity(0);
|
||||
cipher_archives = HashMap::with_capacity(0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1976,8 +2164,11 @@ impl CipherSyncData {
|
||||
}
|
||||
|
||||
// Generate a HashMap with the Organization UUID as key and the Membership record
|
||||
let members: HashMap<OrganizationId, Membership> =
|
||||
Membership::find_by_user(user_id, conn).await.into_iter().map(|m| (m.org_uuid.clone(), m)).collect();
|
||||
let members: HashMap<OrganizationId, Membership> = Membership::find_confirmed_by_user(user_id, conn)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|m| (m.org_uuid.clone(), m))
|
||||
.collect();
|
||||
|
||||
// Generate a HashMap with the User_Collections UUID as key and the CollectionUser record
|
||||
let user_collections: HashMap<CollectionId, CollectionUser> = CollectionUser::find_by_user(user_id, conn)
|
||||
@@ -2015,6 +2206,7 @@ impl CipherSyncData {
|
||||
};
|
||||
|
||||
Self {
|
||||
cipher_archives,
|
||||
cipher_attachments,
|
||||
cipher_folders,
|
||||
cipher_favorites,
|
||||
|
||||
@@ -653,7 +653,7 @@ async fn password_emergency_access(
|
||||
};
|
||||
|
||||
// change grantor_user password
|
||||
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None);
|
||||
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None, &conn).await?;
|
||||
grantor_user.save(&conn).await?;
|
||||
|
||||
// Disable TwoFactor providers since they will otherwise block logins
|
||||
|
||||
@@ -240,7 +240,7 @@ async fn _log_user_event(
|
||||
ip: &IpAddr,
|
||||
conn: &DbConn,
|
||||
) {
|
||||
let memberships = Membership::find_by_user(user_id, conn).await;
|
||||
let memberships = Membership::find_confirmed_by_user(user_id, conn).await;
|
||||
let mut events: Vec<Event> = Vec::with_capacity(memberships.len() + 1); // We need an event per org and one without an org
|
||||
|
||||
// Upstream saves the event also without any org_id.
|
||||
|
||||
@@ -8,6 +8,7 @@ use crate::{
|
||||
models::{Folder, FolderId},
|
||||
DbConn,
|
||||
},
|
||||
util::deser_opt_nonempty_str,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
@@ -38,6 +39,7 @@ async fn get_folder(folder_id: FolderId, headers: Headers, conn: DbConn) -> Json
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct FolderData {
|
||||
pub name: String,
|
||||
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
|
||||
pub id: Option<FolderId>,
|
||||
}
|
||||
|
||||
|
||||
+16
-17
@@ -59,7 +59,8 @@ use crate::{
|
||||
error::Error,
|
||||
http_client::make_http_request,
|
||||
mail,
|
||||
util::parse_experimental_client_feature_flags,
|
||||
util::{parse_experimental_client_feature_flags, FeatureFlagFilter},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -123,7 +124,7 @@ async fn post_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: Db
|
||||
|
||||
user.save(&conn).await?;
|
||||
|
||||
nt.send_user_update(UpdateType::SyncSettings, &user, &headers.device.push_uuid, &conn).await;
|
||||
nt.send_user_update(UpdateType::SyncSettings, &user, headers.device.push_uuid.as_ref(), &conn).await;
|
||||
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
@@ -136,7 +137,7 @@ async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbC
|
||||
#[get("/hibp/breach?<username>")]
|
||||
async fn hibp_breach(username: &str, _headers: Headers) -> JsonResult {
|
||||
let username: String = url::form_urlencoded::byte_serialize(username.as_bytes()).collect();
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
if let Some(api_key) = CONFIG.hibp_api_key() {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
||||
);
|
||||
@@ -197,19 +198,17 @@ fn get_api_webauthn(_headers: Headers) -> Json<Value> {
|
||||
|
||||
#[get("/config")]
|
||||
fn config() -> Json<Value> {
|
||||
let domain = crate::CONFIG.domain();
|
||||
let domain = CONFIG.domain();
|
||||
// Official available feature flags can be found here:
|
||||
// Server (v2025.6.2): https://github.com/bitwarden/server/blob/d094be3267f2030bd0dc62106bc6871cf82682f5/src/Core/Constants.cs#L103
|
||||
// Client (web-v2025.6.1): https://github.com/bitwarden/clients/blob/747c2fd6a1c348a57a76e4a7de8128466ffd3c01/libs/common/src/enums/feature-flag.enum.ts#L12
|
||||
// Android (v2025.6.0): https://github.com/bitwarden/android/blob/b5b022caaad33390c31b3021b2c1205925b0e1a2/app/src/main/kotlin/com/x8bit/bitwarden/data/platform/manager/model/FlagKey.kt#L22
|
||||
// iOS (v2025.6.0): https://github.com/bitwarden/ios/blob/ff06d9c6cc8da89f78f37f376495800201d7261a/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
|
||||
let mut feature_states =
|
||||
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||
feature_states.insert("duo-redirect".to_string(), true);
|
||||
feature_states.insert("email-verification".to_string(), true);
|
||||
feature_states.insert("unauth-ui-refresh".to_string(), true);
|
||||
feature_states.insert("enable-pm-flight-recorder".to_string(), true);
|
||||
feature_states.insert("mobile-error-reporting".to_string(), true);
|
||||
// Server (v2026.2.1): https://github.com/bitwarden/server/blob/0e42725d0837bd1c0dabd864ff621a579959744b/src/Core/Constants.cs#L135
|
||||
// Client (v2026.2.1): https://github.com/bitwarden/clients/blob/f96380c3138291a028bdd2c7a5fee540d5c98ba5/libs/common/src/enums/feature-flag.enum.ts#L12
|
||||
// Android (v2026.2.1): https://github.com/bitwarden/android/blob/6902c19c0093fa476bbf74ccaa70c9f14afbb82f/core/src/main/kotlin/com/bitwarden/core/data/manager/model/FlagKey.kt#L31
|
||||
// iOS (v2026.2.1): https://github.com/bitwarden/ios/blob/cdd9ba1770ca2ffc098d02d12cc3208e3a830454/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
|
||||
let mut feature_states = parse_experimental_client_feature_flags(
|
||||
&CONFIG.experimental_client_feature_flags(),
|
||||
FeatureFlagFilter::ValidOnly,
|
||||
);
|
||||
feature_states.insert("pm-19148-innovation-archive".to_string(), true);
|
||||
|
||||
Json(json!({
|
||||
// Note: The clients use this version to handle backwards compatibility concerns
|
||||
@@ -225,7 +224,7 @@ fn config() -> Json<Value> {
|
||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
||||
},
|
||||
"settings": {
|
||||
"disableUserRegistration": crate::CONFIG.is_signup_disabled()
|
||||
"disableUserRegistration": CONFIG.is_signup_disabled()
|
||||
},
|
||||
"environment": {
|
||||
"vault": domain,
|
||||
@@ -278,7 +277,7 @@ async fn accept_org_invite(
|
||||
|
||||
member.save(conn).await?;
|
||||
|
||||
if crate::CONFIG.mail_enabled() {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org = match Organization::find_by_uuid(&member.org_uuid, conn).await {
|
||||
Some(org) => org,
|
||||
None => err!("Organization not found."),
|
||||
|
||||
+219
-500
File diff suppressed because it is too large
Load Diff
@@ -156,7 +156,7 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, conn: DbConn
|
||||
}
|
||||
};
|
||||
|
||||
GroupUser::delete_all_by_group(&group_uuid, &conn).await?;
|
||||
GroupUser::delete_all_by_group(&group_uuid, &org_id, &conn).await?;
|
||||
|
||||
for ext_id in &group_data.member_external_ids {
|
||||
if let Some(member) = Membership::find_by_external_id_and_org(ext_id, &org_id, &conn).await {
|
||||
|
||||
@@ -574,7 +574,7 @@ async fn download_url(host: &Host, send_id: &SendId, file_id: &SendFileId) -> Re
|
||||
|
||||
Ok(format!("{}/api/sends/{send_id}/{file_id}?t={token}", &host.host))
|
||||
} else {
|
||||
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_secs(5 * 60)).await?.uri().to_string())
|
||||
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_mins(5)).await?.uri().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,10 +7,10 @@ use crate::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||
},
|
||||
auth::Headers,
|
||||
auth::{ClientHeaders, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{DeviceId, EventType, TwoFactor, TwoFactorType, User, UserId},
|
||||
models::{AuthRequest, AuthRequestId, DeviceId, EventType, TwoFactor, TwoFactorType, User, UserId},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
@@ -30,35 +30,63 @@ struct SendEmailLoginData {
|
||||
email: Option<String>,
|
||||
#[serde(alias = "MasterPasswordHash")]
|
||||
master_password_hash: Option<String>,
|
||||
auth_request_id: Option<AuthRequestId>,
|
||||
auth_request_access_code: Option<String>,
|
||||
}
|
||||
|
||||
/// User is trying to login and wants to use email 2FA.
|
||||
/// Does not require Bearer token
|
||||
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
|
||||
async fn send_email_login(data: Json<SendEmailLoginData>, conn: DbConn) -> EmptyResult {
|
||||
async fn send_email_login(data: Json<SendEmailLoginData>, client_headers: ClientHeaders, conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailLoginData = data.into_inner();
|
||||
|
||||
if !CONFIG._enable_email_2fa() {
|
||||
err!("Email 2FA is disabled")
|
||||
}
|
||||
|
||||
// Ratelimit the login
|
||||
crate::ratelimit::check_limit_login(&client_headers.ip.ip)?;
|
||||
|
||||
// Get the user
|
||||
let email = match &data.email {
|
||||
Some(email) if !email.is_empty() => Some(email),
|
||||
_ => None,
|
||||
};
|
||||
let user = if let Some(email) = email {
|
||||
let Some(master_password_hash) = &data.master_password_hash else {
|
||||
err!("No password hash has been submitted.")
|
||||
};
|
||||
let master_password_hash = match &data.master_password_hash {
|
||||
Some(password_hash) if !password_hash.is_empty() => Some(password_hash),
|
||||
_ => None,
|
||||
};
|
||||
let auth_request_id = match &data.auth_request_id {
|
||||
Some(auth_request_id) if !auth_request_id.is_empty() => Some(auth_request_id),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let user = if let Some(email) = email {
|
||||
let Some(user) = User::find_by_mail(email, &conn).await else {
|
||||
err!("Username or password is incorrect. Try again.")
|
||||
};
|
||||
|
||||
// Check password
|
||||
if !user.check_valid_password(master_password_hash) {
|
||||
err!("Username or password is incorrect. Try again.")
|
||||
if let Some(master_password_hash) = master_password_hash {
|
||||
// Check password
|
||||
if !user.check_valid_password(master_password_hash) {
|
||||
err!("Username or password is incorrect. Try again.")
|
||||
}
|
||||
} else if let Some(auth_request_id) = auth_request_id {
|
||||
let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_id, &conn).await else {
|
||||
err!("AuthRequest doesn't exist", "User not found")
|
||||
};
|
||||
let Some(code) = &data.auth_request_access_code else {
|
||||
err!("no auth request access code")
|
||||
};
|
||||
|
||||
if auth_request.device_type != client_headers.device_type
|
||||
|| auth_request.request_ip != client_headers.ip.ip.to_string()
|
||||
|| !auth_request.check_access_code(code)
|
||||
{
|
||||
err!("AuthRequest doesn't exist", "Invalid device, IP or code")
|
||||
}
|
||||
} else {
|
||||
err!("No password hash has been submitted.")
|
||||
}
|
||||
|
||||
user
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use chrono::{TimeDelta, Utc};
|
||||
use data_encoding::BASE32;
|
||||
use num_traits::FromPrimitive;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -9,12 +11,12 @@ use crate::{
|
||||
core::{log_event, log_user_event},
|
||||
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||
},
|
||||
auth::{ClientHeaders, Headers},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{
|
||||
DeviceType, EventType, Membership, MembershipType, OrgPolicyType, Organization, OrganizationId, TwoFactor,
|
||||
TwoFactorIncomplete, User, UserId,
|
||||
TwoFactorIncomplete, TwoFactorType, User, UserId,
|
||||
},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
@@ -31,11 +33,47 @@ pub mod protected_actions;
|
||||
pub mod webauthn;
|
||||
pub mod yubikey;
|
||||
|
||||
fn has_global_duo_credentials() -> bool {
|
||||
CONFIG._enable_duo() && CONFIG.duo_host().is_some() && CONFIG.duo_ikey().is_some() && CONFIG.duo_skey().is_some()
|
||||
}
|
||||
|
||||
pub fn is_twofactor_provider_usable(provider_type: TwoFactorType, provider_data: Option<&str>) -> bool {
|
||||
#[derive(Deserialize)]
|
||||
struct DuoProviderData {
|
||||
host: String,
|
||||
ik: String,
|
||||
sk: String,
|
||||
}
|
||||
|
||||
match provider_type {
|
||||
TwoFactorType::Authenticator => true,
|
||||
TwoFactorType::Email => CONFIG._enable_email_2fa(),
|
||||
TwoFactorType::Duo | TwoFactorType::OrganizationDuo => {
|
||||
provider_data
|
||||
.and_then(|raw| serde_json::from_str::<DuoProviderData>(raw).ok())
|
||||
.is_some_and(|duo| !duo.host.is_empty() && !duo.ik.is_empty() && !duo.sk.is_empty())
|
||||
|| has_global_duo_credentials()
|
||||
}
|
||||
TwoFactorType::YubiKey => {
|
||||
CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some()
|
||||
}
|
||||
TwoFactorType::Webauthn => CONFIG.is_webauthn_2fa_supported(),
|
||||
TwoFactorType::Remember => !CONFIG.disable_2fa_remember(),
|
||||
TwoFactorType::RecoveryCode => true,
|
||||
TwoFactorType::U2f
|
||||
| TwoFactorType::U2fRegisterChallenge
|
||||
| TwoFactorType::U2fLoginChallenge
|
||||
| TwoFactorType::EmailVerificationChallenge
|
||||
| TwoFactorType::WebauthnRegisterChallenge
|
||||
| TwoFactorType::WebauthnLoginChallenge
|
||||
| TwoFactorType::ProtectedActions => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut routes = routes![
|
||||
get_twofactor,
|
||||
get_recover,
|
||||
recover,
|
||||
disable_twofactor,
|
||||
disable_twofactor_put,
|
||||
get_device_verification_settings,
|
||||
@@ -54,7 +92,13 @@ pub fn routes() -> Vec<Route> {
|
||||
#[get("/two-factor")]
|
||||
async fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn).await;
|
||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||
let twofactors_json: Vec<Value> = twofactors
|
||||
.iter()
|
||||
.filter_map(|tf| {
|
||||
let provider_type = TwoFactorType::from_i32(tf.atype)?;
|
||||
is_twofactor_provider_usable(provider_type, Some(&tf.data)).then(|| TwoFactor::to_json_provider(tf))
|
||||
})
|
||||
.collect();
|
||||
|
||||
Json(json!({
|
||||
"data": twofactors_json,
|
||||
@@ -76,54 +120,6 @@ async fn get_recover(data: Json<PasswordOrOtpData>, headers: Headers, conn: DbCo
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RecoverTwoFactor {
|
||||
master_password_hash: String,
|
||||
email: String,
|
||||
recovery_code: String,
|
||||
}
|
||||
|
||||
#[post("/two-factor/recover", data = "<data>")]
|
||||
async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, conn: DbConn) -> JsonResult {
|
||||
let data: RecoverTwoFactor = data.into_inner();
|
||||
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let Some(mut user) = User::find_by_mail(&data.email, &conn).await else {
|
||||
err!("Username or password is incorrect. Try again.")
|
||||
};
|
||||
|
||||
// Check password
|
||||
if !user.check_valid_password(&data.master_password_hash) {
|
||||
err!("Username or password is incorrect. Try again.")
|
||||
}
|
||||
|
||||
// Check if recovery code is correct
|
||||
if !user.check_valid_recovery_code(&data.recovery_code) {
|
||||
err!("Recovery code is incorrect. Try again.")
|
||||
}
|
||||
|
||||
// Remove all twofactors from the user
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
enforce_2fa_policy(&user, &user.uuid, client_headers.device_type, &client_headers.ip.ip, &conn).await?;
|
||||
|
||||
log_user_event(
|
||||
EventType::UserRecovered2fa as i32,
|
||||
&user.uuid,
|
||||
client_headers.device_type,
|
||||
&client_headers.ip.ip,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Remove the recovery code, not needed without twofactors
|
||||
user.totp_recover = None;
|
||||
user.save(&conn).await?;
|
||||
Ok(Json(Value::Object(serde_json::Map::new())))
|
||||
}
|
||||
|
||||
async fn _generate_recover_code(user: &mut User, conn: &DbConn) {
|
||||
if user.totp_recover.is_none() {
|
||||
let totp_recover = crypto::encode_random_bytes::<20>(&BASE32);
|
||||
|
||||
@@ -38,7 +38,7 @@ static WEBAUTHN: LazyLock<Webauthn> = LazyLock::new(|| {
|
||||
let webauthn = WebauthnBuilder::new(&rp_id, &rp_origin)
|
||||
.expect("Creating WebauthnBuilder failed")
|
||||
.rp_name(&domain)
|
||||
.timeout(Duration::from_millis(60000));
|
||||
.timeout(Duration::from_mins(1));
|
||||
|
||||
webauthn.build().expect("Building Webauthn failed")
|
||||
});
|
||||
@@ -108,8 +108,8 @@ impl WebauthnRegistration {
|
||||
|
||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||
async fn get_webauthn(data: Json<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.domain_set() {
|
||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||
if !CONFIG.is_webauthn_2fa_supported() {
|
||||
err!("Configured `DOMAIN` is not compatible with Webauthn")
|
||||
}
|
||||
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
@@ -144,7 +144,7 @@ async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Hea
|
||||
let (mut challenge, state) = WEBAUTHN.start_passkey_registration(
|
||||
Uuid::from_str(&user.uuid).expect("Failed to parse UUID"), // Should never fail
|
||||
&user.email,
|
||||
&user.name,
|
||||
user.display_name(),
|
||||
Some(registrations),
|
||||
)?;
|
||||
|
||||
@@ -438,7 +438,7 @@ pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &Db
|
||||
// We need to check for and update the backup_eligible flag when needed.
|
||||
// Vaultwarden did not have knowledge of this flag prior to migrating to webauthn-rs v0.5.x
|
||||
// Because of this we check the flag at runtime and update the registrations and state when needed
|
||||
check_and_update_backup_eligible(user_id, &rsp, &mut registrations, &mut state, conn).await?;
|
||||
let backup_flags_updated = check_and_update_backup_eligible(&rsp, &mut registrations, &mut state)?;
|
||||
|
||||
let authentication_result = WEBAUTHN.finish_passkey_authentication(&rsp, &state)?;
|
||||
|
||||
@@ -446,7 +446,8 @@ pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &Db
|
||||
if ct_eq(reg.credential.cred_id(), authentication_result.cred_id()) {
|
||||
// If the cred id matches and the credential is updated, Some(true) is returned
|
||||
// In those cases, update the record, else leave it alone
|
||||
if reg.credential.update_credential(&authentication_result) == Some(true) {
|
||||
let credential_updated = reg.credential.update_credential(&authentication_result) == Some(true);
|
||||
if credential_updated || backup_flags_updated {
|
||||
TwoFactor::new(user_id.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
.save(conn)
|
||||
.await?;
|
||||
@@ -463,13 +464,11 @@ pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &Db
|
||||
)
|
||||
}
|
||||
|
||||
async fn check_and_update_backup_eligible(
|
||||
user_id: &UserId,
|
||||
fn check_and_update_backup_eligible(
|
||||
rsp: &PublicKeyCredential,
|
||||
registrations: &mut Vec<WebauthnRegistration>,
|
||||
state: &mut PasskeyAuthentication,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
) -> Result<bool, Error> {
|
||||
// The feature flags from the response
|
||||
// For details see: https://www.w3.org/TR/webauthn-3/#sctn-authenticator-data
|
||||
const FLAG_BACKUP_ELIGIBLE: u8 = 0b0000_1000;
|
||||
@@ -486,16 +485,7 @@ async fn check_and_update_backup_eligible(
|
||||
let rsp_id = rsp.raw_id.as_slice();
|
||||
for reg in &mut *registrations {
|
||||
if ct_eq(reg.credential.cred_id().as_slice(), rsp_id) {
|
||||
// Try to update the key, and if needed also update the database, before the actual state check is done
|
||||
if reg.set_backup_eligible(backup_eligible, backup_state) {
|
||||
TwoFactor::new(
|
||||
user_id.clone(),
|
||||
TwoFactorType::Webauthn,
|
||||
serde_json::to_string(®istrations)?,
|
||||
)
|
||||
.save(conn)
|
||||
.await?;
|
||||
|
||||
// We also need to adjust the current state which holds the challenge used to start the authentication verification
|
||||
// Because Vaultwarden supports multiple keys, we need to loop through the deserialized state and check which key to update
|
||||
let mut raw_state = serde_json::to_value(&state)?;
|
||||
@@ -517,11 +507,12 @@ async fn check_and_update_backup_eligible(
|
||||
}
|
||||
|
||||
*state = serde_json::from_value(raw_state)?;
|
||||
return Ok(true);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
+57
-79
@@ -19,7 +19,7 @@ use svg_hush::{data_url_filter, Filter};
|
||||
use crate::{
|
||||
config::PathType,
|
||||
error::Error,
|
||||
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
|
||||
http_client::{get_reqwest_client_builder, get_valid_host, should_block_host, CustomHttpClientError},
|
||||
util::Cached,
|
||||
CONFIG,
|
||||
};
|
||||
@@ -81,19 +81,19 @@ static ICON_SIZE_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"(?x)(\d+
|
||||
// The function name `icon_external` is checked in the `on_response` function in `AppHeaders`
|
||||
// It is used to prevent sending a specific header which breaks icon downloads.
|
||||
// If this function needs to be renamed, also adjust the code in `util.rs`
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon_external(domain: &str) -> Cached<Option<Redirect>> {
|
||||
if !is_valid_domain(domain) {
|
||||
warn!("Invalid domain: {domain}");
|
||||
#[get("/<host>/icon.png")]
|
||||
fn icon_external(host: &str) -> Cached<Option<Redirect>> {
|
||||
let Ok(host) = get_valid_host(host) else {
|
||||
warn!("Invalid host: {host}");
|
||||
return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
|
||||
};
|
||||
|
||||
if should_block_host(&host).is_err() {
|
||||
warn!("Blocked address: {host}");
|
||||
return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
|
||||
}
|
||||
|
||||
if should_block_address(domain) {
|
||||
warn!("Blocked address: {domain}");
|
||||
return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
|
||||
}
|
||||
|
||||
let url = CONFIG._icon_service_url().replace("{}", domain);
|
||||
let url = CONFIG._icon_service_url().replace("{}", &host.to_string());
|
||||
let redir = match CONFIG.icon_redirect_code() {
|
||||
301 => Some(Redirect::moved(url)), // legacy permanent redirect
|
||||
302 => Some(Redirect::found(url)), // legacy temporary redirect
|
||||
@@ -107,12 +107,21 @@ fn icon_external(domain: &str) -> Cached<Option<Redirect>> {
|
||||
Cached::ttl(redir, CONFIG.icon_cache_ttl(), true)
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||
#[get("/<host>/icon.png")]
|
||||
async fn icon_internal(host: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||
|
||||
if !is_valid_domain(domain) {
|
||||
warn!("Invalid domain: {domain}");
|
||||
let Ok(host) = get_valid_host(host) else {
|
||||
warn!("Invalid host: {host}");
|
||||
return Cached::ttl(
|
||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
true,
|
||||
);
|
||||
};
|
||||
|
||||
if should_block_host(&host).is_err() {
|
||||
warn!("Blocked address: {host}");
|
||||
return Cached::ttl(
|
||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
@@ -120,16 +129,7 @@ async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||
);
|
||||
}
|
||||
|
||||
if should_block_address(domain) {
|
||||
warn!("Blocked address: {domain}");
|
||||
return Cached::ttl(
|
||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
match get_icon(domain).await {
|
||||
match get_icon(&host.to_string()).await {
|
||||
Some((icon, icon_type)) => {
|
||||
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
|
||||
}
|
||||
@@ -137,42 +137,6 @@ async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns if the domain provided is valid or not.
|
||||
///
|
||||
/// This does some manual checks and makes use of Url to do some basic checking.
|
||||
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
|
||||
fn is_valid_domain(domain: &str) -> bool {
|
||||
const ALLOWED_CHARS: &str = "-.";
|
||||
|
||||
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||
if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) {
|
||||
debug!("Domain parse error: '{domain}' - {parse_error:?}");
|
||||
return false;
|
||||
} else if domain.is_empty()
|
||||
|| domain.contains("..")
|
||||
|| domain.starts_with('.')
|
||||
|| domain.starts_with('-')
|
||||
|| domain.ends_with('-')
|
||||
{
|
||||
debug!(
|
||||
"Domain validation error: '{domain}' is either empty, contains '..', starts with an '.', starts or ends with a '-'"
|
||||
);
|
||||
return false;
|
||||
} else if domain.len() > 255 {
|
||||
debug!("Domain validation error: '{domain}' exceeds 255 characters");
|
||||
return false;
|
||||
}
|
||||
|
||||
for c in domain.chars() {
|
||||
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
||||
debug!("Domain validation error: '{domain}' contains an invalid character '{c}'");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
let path = format!("{domain}.png");
|
||||
|
||||
@@ -367,7 +331,7 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
tld = domain_parts.next_back().unwrap(),
|
||||
base = domain_parts.next_back().unwrap()
|
||||
);
|
||||
if is_valid_domain(&base_domain) {
|
||||
if get_valid_host(&base_domain).is_ok() {
|
||||
let sslbase = format!("https://{base_domain}");
|
||||
let httpbase = format!("http://{base_domain}");
|
||||
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
||||
@@ -378,7 +342,7 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||
let www_domain = format!("www.{domain}");
|
||||
if is_valid_domain(&www_domain) {
|
||||
if get_valid_host(&www_domain).is_ok() {
|
||||
let sslwww = format!("https://{www_domain}");
|
||||
let httpwww = format!("http://{www_domain}");
|
||||
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
||||
@@ -513,13 +477,11 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
|
||||
|
||||
if !sizes.is_empty() {
|
||||
match ICON_SIZE_REGEX.captures(sizes.trim()) {
|
||||
None => {}
|
||||
Some(dimensions) => {
|
||||
if dimensions.len() >= 3 {
|
||||
width = dimensions[1].parse::<u16>().unwrap_or_default();
|
||||
height = dimensions[2].parse::<u16>().unwrap_or_default();
|
||||
}
|
||||
Some(dimensions) if dimensions.len() >= 3 => {
|
||||
width = dimensions[1].parse::<u16>().unwrap_or_default();
|
||||
height = dimensions[2].parse::<u16>().unwrap_or_default();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -534,7 +496,8 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
|
||||
use data_url::DataUrl;
|
||||
|
||||
for icon in icon_result.iconlist.iter().take(5) {
|
||||
let mut icons = icon_result.iconlist.iter().take(5).peekable();
|
||||
while let Some(icon) = icons.next() {
|
||||
if icon.href.starts_with("data:image") {
|
||||
let Ok(datauri) = DataUrl::process(&icon.href) else {
|
||||
continue;
|
||||
@@ -562,11 +525,23 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
_ => debug!("Extracted icon from data:image uri is invalid"),
|
||||
};
|
||||
} else {
|
||||
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
|
||||
debug!("Trying {}", icon.href);
|
||||
// Make sure all icons are checked before returning error
|
||||
let res = match get_page_with_referer(&icon.href, &icon_result.referer).await {
|
||||
Ok(r) => r,
|
||||
Err(e) if icons.peek().is_none() => return Err(e),
|
||||
Err(e) if CustomHttpClientError::downcast_ref(&e).is_some() => return Err(e), // If blacklisted stop immediately instead of checking the rest of the icons. see explanation and actual handling inside get_icon()
|
||||
Err(e) => {
|
||||
warn!("Unable to download icon: {e:?}");
|
||||
|
||||
// Continue to next icon
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
||||
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
// Check if the icon type is allowed, else try another icon from the list.
|
||||
icon_type = get_icon_type(&buffer);
|
||||
if icon_type.is_none() {
|
||||
buffer.clear();
|
||||
@@ -620,14 +595,17 @@ fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||
None
|
||||
}
|
||||
|
||||
// Some details can be found here:
|
||||
// - https://www.garykessler.net/library/file_sigs_GCK_latest.html
|
||||
// - https://en.wikipedia.org/wiki/List_of_file_signatures
|
||||
match bytes {
|
||||
[137, 80, 78, 71, ..] => Some("png"),
|
||||
[0, 0, 1, 0, ..] => Some("x-icon"),
|
||||
[82, 73, 70, 70, ..] => Some("webp"),
|
||||
[255, 216, 255, ..] => Some("jpeg"),
|
||||
[71, 73, 70, 56, ..] => Some("gif"),
|
||||
[66, 77, ..] => Some("bmp"),
|
||||
[60, 115, 118, 103, ..] => Some("svg+xml"), // Normal svg
|
||||
[137, 80, 78, 71, 13, 10, 26, 10, ..] => Some("png"),
|
||||
[0, 0, 1, 0, n1, n2, ..] if u16::from_le_bytes([*n1, *n2]) > 0 => Some("x-icon"), // https://en.wikipedia.org/wiki/ICO_(file_format)
|
||||
[82, 73, 70, 70, _, _, _, _, 87, 69, 66, 80, ..] => Some("webp"), // Only match WebP Images
|
||||
[255, 216, 255, b, ..] if *b >= 0xC0 => Some("jpeg"),
|
||||
[71, 73, 70, 56, 55 | 57, 97, ..] => Some("gif"),
|
||||
[66, 77, _, _, _, _, 0, 0, 0, 0, ..] => Some("bmp"), // https://en.wikipedia.org/wiki/BMP_file_format
|
||||
[60, 115, 118, 103, ..] => Some("svg+xml"), // Normal svg
|
||||
[60, 63, 120, 109, 108, ..] => check_svg_after_xml_declaration(bytes), // An svg starting with <?xml
|
||||
_ => None,
|
||||
}
|
||||
|
||||
+210
-65
@@ -2,7 +2,7 @@ use chrono::Utc;
|
||||
use num_traits::FromPrimitive;
|
||||
use rocket::{
|
||||
form::{Form, FromForm},
|
||||
http::Status,
|
||||
http::{Cookie, CookieJar, SameSite},
|
||||
response::Redirect,
|
||||
serde::json::Json,
|
||||
Route,
|
||||
@@ -12,16 +12,20 @@ use serde_json::Value;
|
||||
use crate::{
|
||||
api::{
|
||||
core::{
|
||||
accounts::{PreloginData, RegisterData, _prelogin, _register, kdf_upgrade},
|
||||
accounts::{_prelogin, _register, kdf_upgrade, PreloginData, RegisterData},
|
||||
log_user_event,
|
||||
two_factor::{authenticator, duo, duo_oidc, email, enforce_2fa_policy, webauthn, yubikey},
|
||||
two_factor::{
|
||||
authenticator, duo, duo_oidc, email, enforce_2fa_policy, is_twofactor_provider_usable, webauthn,
|
||||
yubikey,
|
||||
},
|
||||
},
|
||||
master_password_policy,
|
||||
push::register_push_device,
|
||||
ApiResult, EmptyResult, JsonResult,
|
||||
},
|
||||
auth,
|
||||
auth::{generate_organization_api_key_login_claims, AuthMethod, ClientHeaders, ClientIp, ClientVersion},
|
||||
auth::{generate_organization_api_key_login_claims, AuthMethod, ClientHeaders, ClientIp, ClientVersion, Secure},
|
||||
crypto,
|
||||
db::{
|
||||
models::{
|
||||
AuthRequest, AuthRequestId, Device, DeviceId, EventType, Invitation, OIDCCodeWrapper, OrganizationApiKey,
|
||||
@@ -39,6 +43,7 @@ pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
login,
|
||||
prelogin,
|
||||
prelogin_password,
|
||||
identity_register,
|
||||
register_verification_email,
|
||||
register_finish,
|
||||
@@ -62,43 +67,43 @@ async fn login(
|
||||
|
||||
let login_result = match data.grant_type.as_ref() {
|
||||
"refresh_token" => {
|
||||
_check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
|
||||
_check_is_some(data.refresh_token.as_ref(), "refresh_token cannot be blank")?;
|
||||
_refresh_login(data, &conn, &client_header.ip).await
|
||||
}
|
||||
"password" if CONFIG.sso_enabled() && CONFIG.sso_only() => err!("SSO sign-in is required"),
|
||||
"password" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
_check_is_some(&data.password, "password cannot be blank")?;
|
||||
_check_is_some(&data.scope, "scope cannot be blank")?;
|
||||
_check_is_some(&data.username, "username cannot be blank")?;
|
||||
_check_is_some(data.client_id.as_ref(), "client_id cannot be blank")?;
|
||||
_check_is_some(data.password.as_ref(), "password cannot be blank")?;
|
||||
_check_is_some(data.scope.as_ref(), "scope cannot be blank")?;
|
||||
_check_is_some(data.username.as_ref(), "username cannot be blank")?;
|
||||
|
||||
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
_check_is_some(data.device_identifier.as_ref(), "device_identifier cannot be blank")?;
|
||||
_check_is_some(data.device_name.as_ref(), "device_name cannot be blank")?;
|
||||
_check_is_some(data.device_type.as_ref(), "device_type cannot be blank")?;
|
||||
|
||||
_password_login(data, &mut user_id, &conn, &client_header.ip, &client_version).await
|
||||
_password_login(data, &mut user_id, &conn, &client_header.ip, client_version.as_ref()).await
|
||||
}
|
||||
"client_credentials" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
_check_is_some(&data.client_secret, "client_secret cannot be blank")?;
|
||||
_check_is_some(&data.scope, "scope cannot be blank")?;
|
||||
_check_is_some(data.client_id.as_ref(), "client_id cannot be blank")?;
|
||||
_check_is_some(data.client_secret.as_ref(), "client_secret cannot be blank")?;
|
||||
_check_is_some(data.scope.as_ref(), "scope cannot be blank")?;
|
||||
|
||||
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
_check_is_some(data.device_identifier.as_ref(), "device_identifier cannot be blank")?;
|
||||
_check_is_some(data.device_name.as_ref(), "device_name cannot be blank")?;
|
||||
_check_is_some(data.device_type.as_ref(), "device_type cannot be blank")?;
|
||||
|
||||
_api_key_login(data, &mut user_id, &conn, &client_header.ip).await
|
||||
}
|
||||
"authorization_code" if CONFIG.sso_enabled() => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
_check_is_some(&data.code, "code cannot be blank")?;
|
||||
_check_is_some(&data.code_verifier, "code verifier cannot be blank")?;
|
||||
_check_is_some(data.client_id.as_ref(), "client_id cannot be blank")?;
|
||||
_check_is_some(data.code.as_ref(), "code cannot be blank")?;
|
||||
_check_is_some(data.code_verifier.as_ref(), "code verifier cannot be blank")?;
|
||||
|
||||
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
_check_is_some(data.device_identifier.as_ref(), "device_identifier cannot be blank")?;
|
||||
_check_is_some(data.device_name.as_ref(), "device_name cannot be blank")?;
|
||||
_check_is_some(data.device_type.as_ref(), "device_type cannot be blank")?;
|
||||
|
||||
_sso_login(data, &mut user_id, &conn, &client_header.ip, &client_version).await
|
||||
_sso_login(data, &mut user_id, &conn, &client_header.ip, client_version.as_ref()).await
|
||||
}
|
||||
"authorization_code" => err!("SSO sign-in is not available"),
|
||||
t => err!("Invalid type", t),
|
||||
@@ -128,12 +133,14 @@ async fn login(
|
||||
login_result
|
||||
}
|
||||
|
||||
// Return Status::Unauthorized to trigger logout
|
||||
async fn _refresh_login(data: ConnectData, conn: &DbConn, ip: &ClientIp) -> JsonResult {
|
||||
// Extract token
|
||||
let refresh_token = match data.refresh_token {
|
||||
Some(token) => token,
|
||||
None => err_code!("Missing refresh_token", Status::Unauthorized.code),
|
||||
// When a refresh token is invalid or missing we need to respond with an HTTP BadRequest (400)
|
||||
// It also needs to return a json which holds at least a key `error` with the value `invalid_grant`
|
||||
// See the link below for details
|
||||
// https://github.com/bitwarden/clients/blob/2ee158e720a5e7dbe3641caf80b569e97a1dd91b/libs/common/src/services/api.service.ts#L1786-L1797
|
||||
|
||||
let Some(refresh_token) = data.refresh_token else {
|
||||
err_json!(json!({"error": "invalid_grant"}), "Missing refresh_token")
|
||||
};
|
||||
|
||||
// ---
|
||||
@@ -144,7 +151,10 @@ async fn _refresh_login(data: ConnectData, conn: &DbConn, ip: &ClientIp) -> Json
|
||||
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
match auth::refresh_tokens(ip, &refresh_token, data.client_id, conn).await {
|
||||
Err(err) => {
|
||||
err_code!(format!("Unable to refresh login credentials: {}", err.message()), Status::Unauthorized.code)
|
||||
err_json!(
|
||||
json!({"error": "invalid_grant"}),
|
||||
format!("Unable to refresh login credentials: {}", err.message())
|
||||
)
|
||||
}
|
||||
Ok((mut device, auth_tokens)) => {
|
||||
// Save to update `device.updated_at` to track usage and toggle new status
|
||||
@@ -169,7 +179,7 @@ async fn _sso_login(
|
||||
user_id: &mut Option<UserId>,
|
||||
conn: &DbConn,
|
||||
ip: &ClientIp,
|
||||
client_version: &Option<ClientVersion>,
|
||||
client_version: Option<&ClientVersion>,
|
||||
) -> JsonResult {
|
||||
AuthMethod::Sso.check_scope(data.scope.as_ref())?;
|
||||
|
||||
@@ -220,7 +230,33 @@ async fn _sso_login(
|
||||
}
|
||||
)
|
||||
}
|
||||
Some((user, None)) => Some((user, None)),
|
||||
Some((user, None)) => match user_infos.email_verified {
|
||||
None if !CONFIG.sso_allow_unknown_email_verification() => {
|
||||
error!(
|
||||
"Login failure ({}), existing non SSO user ({}) with same email ({}) and email verification status is unknown",
|
||||
user_infos.identifier, user.uuid, user.email
|
||||
);
|
||||
err_silent!(
|
||||
"Email verification status is unknown",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
Some(false) => {
|
||||
error!(
|
||||
"Login failure ({}), existing non SSO user ({}) with same email ({}) and email is not verified",
|
||||
user_infos.identifier, user.uuid, user.email
|
||||
);
|
||||
err_silent!(
|
||||
"Email is not verified by the SSO provider",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
_ => Some((user, None)),
|
||||
},
|
||||
},
|
||||
Some((user, sso_user)) => Some((user, Some(sso_user))),
|
||||
};
|
||||
@@ -266,7 +302,7 @@ async fn _sso_login(
|
||||
Some((user, _)) if !user.enabled => {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, user.name),
|
||||
format!("IP: {}. Username: {}.", ip.ip, user.display_name()),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
@@ -312,7 +348,7 @@ async fn _password_login(
|
||||
user_id: &mut Option<UserId>,
|
||||
conn: &DbConn,
|
||||
ip: &ClientIp,
|
||||
client_version: &Option<ClientVersion>,
|
||||
client_version: Option<&ClientVersion>,
|
||||
) -> JsonResult {
|
||||
// Validate scope
|
||||
AuthMethod::Password.check_scope(data.scope.as_ref())?;
|
||||
@@ -482,14 +518,18 @@ async fn authenticated_response(
|
||||
Value::Null
|
||||
};
|
||||
|
||||
let account_keys = json!({
|
||||
"publicKeyEncryptionKeyPair": {
|
||||
"wrappedPrivateKey": user.private_key,
|
||||
"publicKey": user.public_key,
|
||||
"Object": "publicKeyEncryptionKeyPair"
|
||||
},
|
||||
"Object": "privateKeys"
|
||||
});
|
||||
let account_keys = if user.private_key.is_some() {
|
||||
json!({
|
||||
"publicKeyEncryptionKeyPair": {
|
||||
"wrappedPrivateKey": user.private_key,
|
||||
"publicKey": user.public_key,
|
||||
"Object": "publicKeyEncryptionKeyPair"
|
||||
},
|
||||
"Object": "privateKeys"
|
||||
})
|
||||
} else {
|
||||
Value::Null
|
||||
};
|
||||
|
||||
let mut result = json!({
|
||||
"access_token": auth_tokens.access_token(),
|
||||
@@ -521,7 +561,7 @@ async fn authenticated_response(
|
||||
result["TwoFactorToken"] = Value::String(token);
|
||||
}
|
||||
|
||||
info!("User {} logged in successfully. IP: {}", &user.name, ip.ip);
|
||||
info!("User {} logged in successfully. IP: {}", user.display_name(), ip.ip);
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
@@ -610,6 +650,38 @@ async fn _user_api_key_login(
|
||||
|
||||
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
||||
|
||||
let has_master_password = !user.password_hash.is_empty();
|
||||
let master_password_unlock = if has_master_password {
|
||||
json!({
|
||||
"Kdf": {
|
||||
"KdfType": user.client_kdf_type,
|
||||
"Iterations": user.client_kdf_iter,
|
||||
"Memory": user.client_kdf_memory,
|
||||
"Parallelism": user.client_kdf_parallelism
|
||||
},
|
||||
// This field is named inconsistently and will be removed and replaced by the "wrapped" variant in the apps.
|
||||
// https://github.com/bitwarden/android/blob/release/2025.12-rc41/network/src/main/kotlin/com/bitwarden/network/model/MasterPasswordUnlockDataJson.kt#L22-L26
|
||||
"MasterKeyEncryptedUserKey": user.akey,
|
||||
"MasterKeyWrappedUserKey": user.akey,
|
||||
"Salt": user.email
|
||||
})
|
||||
} else {
|
||||
Value::Null
|
||||
};
|
||||
|
||||
let account_keys = if user.private_key.is_some() {
|
||||
json!({
|
||||
"publicKeyEncryptionKeyPair": {
|
||||
"wrappedPrivateKey": user.private_key,
|
||||
"publicKey": user.public_key,
|
||||
"Object": "publicKeyEncryptionKeyPair"
|
||||
},
|
||||
"Object": "privateKeys"
|
||||
})
|
||||
} else {
|
||||
Value::Null
|
||||
};
|
||||
|
||||
// Note: No refresh_token is returned. The CLI just repeats the
|
||||
// client_credentials login flow when the existing token expires.
|
||||
let result = json!({
|
||||
@@ -624,7 +696,14 @@ async fn _user_api_key_login(
|
||||
"KdfMemory": user.client_kdf_memory,
|
||||
"KdfParallelism": user.client_kdf_parallelism,
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"ForcePasswordReset": false,
|
||||
"scope": AuthMethod::UserApiKey.scope(),
|
||||
"AccountKeys": account_keys,
|
||||
"UserDecryptionOptions": {
|
||||
"HasMasterPassword": has_master_password,
|
||||
"MasterPasswordUnlock": master_password_unlock,
|
||||
"Object": "userDecryptionOptions"
|
||||
},
|
||||
});
|
||||
|
||||
Ok(Json(result))
|
||||
@@ -683,7 +762,7 @@ async fn twofactor_auth(
|
||||
data: &ConnectData,
|
||||
device: &mut Device,
|
||||
ip: &ClientIp,
|
||||
client_version: &Option<ClientVersion>,
|
||||
client_version: Option<&ClientVersion>,
|
||||
conn: &DbConn,
|
||||
) -> ApiResult<Option<String>> {
|
||||
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
|
||||
@@ -696,8 +775,27 @@ async fn twofactor_auth(
|
||||
|
||||
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, device.atype, ip, conn).await?;
|
||||
|
||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||
let twofactor_ids: Vec<_> = twofactors
|
||||
.iter()
|
||||
.filter_map(|tf| {
|
||||
let provider_type = TwoFactorType::from_i32(tf.atype)?;
|
||||
(tf.enabled && is_twofactor_provider_usable(provider_type, Some(&tf.data))).then_some(tf.atype)
|
||||
})
|
||||
.collect();
|
||||
if twofactor_ids.is_empty() {
|
||||
err!("No enabled and usable two factor providers are available for this account")
|
||||
}
|
||||
|
||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
||||
// Ignore Remember and RecoveryCode Types during this check, these are special
|
||||
if ![TwoFactorType::Remember as i32, TwoFactorType::RecoveryCode as i32].contains(&selected_id)
|
||||
&& !twofactor_ids.contains(&selected_id)
|
||||
{
|
||||
err_json!(
|
||||
_json_err_twofactor(&twofactor_ids, &user.uuid, data, client_version, conn).await?,
|
||||
"Invalid two factor provider"
|
||||
)
|
||||
}
|
||||
|
||||
let twofactor_code = match data.two_factor_token {
|
||||
Some(ref code) => code,
|
||||
@@ -714,7 +812,6 @@ async fn twofactor_auth(
|
||||
use crate::crypto::ct_eq;
|
||||
|
||||
let selected_data = _selected_data(selected_twofactor);
|
||||
let mut remember = data.two_factor_remember.unwrap_or(0);
|
||||
|
||||
match TwoFactorType::from_i32(selected_id) {
|
||||
Some(TwoFactorType::Authenticator) => {
|
||||
@@ -746,13 +843,23 @@ async fn twofactor_auth(
|
||||
}
|
||||
Some(TwoFactorType::Remember) => {
|
||||
match device.twofactor_remember {
|
||||
Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
|
||||
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
|
||||
}
|
||||
// When a 2FA Remember token is used, check and validate this JWT token, if it is valid, just continue
|
||||
// If it is invalid we need to trigger the 2FA Login prompt
|
||||
Some(ref token)
|
||||
if !CONFIG.disable_2fa_remember()
|
||||
&& (ct_eq(token, twofactor_code)
|
||||
&& auth::decode_2fa_remember(twofactor_code)
|
||||
.is_ok_and(|t| t.sub == device.uuid && t.user_uuid == user.uuid)) => {}
|
||||
_ => {
|
||||
// Always delete the current twofactor remember token here if it exists
|
||||
if device.twofactor_remember.is_some() {
|
||||
device.delete_twofactor_remember();
|
||||
// We need to save here, since we send a err_json!() which prevents saving `device` at a later stage
|
||||
device.save(true, conn).await?;
|
||||
}
|
||||
err_json!(
|
||||
_json_err_twofactor(&twofactor_ids, &user.uuid, data, client_version, conn).await?,
|
||||
"2FA Remember token not provided"
|
||||
"2FA Remember token not provided or expired"
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -783,10 +890,10 @@ async fn twofactor_auth(
|
||||
|
||||
TwoFactorIncomplete::mark_complete(&user.uuid, &device.uuid, conn).await?;
|
||||
|
||||
let remember = data.two_factor_remember.unwrap_or(0);
|
||||
let two_factor = if !CONFIG.disable_2fa_remember() && remember == 1 {
|
||||
Some(device.refresh_twofactor_remember())
|
||||
} else {
|
||||
device.delete_twofactor_remember();
|
||||
None
|
||||
};
|
||||
Ok(two_factor)
|
||||
@@ -800,7 +907,7 @@ async fn _json_err_twofactor(
|
||||
providers: &[i32],
|
||||
user_id: &UserId,
|
||||
data: &ConnectData,
|
||||
client_version: &Option<ClientVersion>,
|
||||
client_version: Option<&ClientVersion>,
|
||||
conn: &DbConn,
|
||||
) -> ApiResult<Value> {
|
||||
let mut result = json!({
|
||||
@@ -819,7 +926,7 @@ async fn _json_err_twofactor(
|
||||
match TwoFactorType::from_i32(*provider) {
|
||||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||
|
||||
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||
Some(TwoFactorType::Webauthn) if CONFIG.is_webauthn_2fa_supported() => {
|
||||
let request = webauthn::generate_webauthn_login(user_id, conn).await?;
|
||||
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||
}
|
||||
@@ -904,6 +1011,11 @@ async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
_prelogin(data, conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/prelogin/password", data = "<data>")]
|
||||
async fn prelogin_password(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
_prelogin(data, conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, false, conn).await
|
||||
@@ -919,6 +1031,7 @@ struct RegisterVerificationData {
|
||||
|
||||
#[derive(rocket::Responder)]
|
||||
enum RegisterVerificationResponse {
|
||||
#[response(status = 204)]
|
||||
NoContent(()),
|
||||
Token(Json<String>),
|
||||
}
|
||||
@@ -946,12 +1059,11 @@ async fn register_verification_email(
|
||||
let user = User::find_by_mail(&data.email, &conn).await;
|
||||
if user.filter(|u| u.private_key.is_some()).is_some() {
|
||||
// There is still a timing side channel here in that the code
|
||||
// paths that send mail take noticeably longer than ones that
|
||||
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||
let mut rng = SmallRng::from_os_rng();
|
||||
let delta: i32 = 100;
|
||||
let sleep_ms = (1_000 + rng.random_range(-delta..=delta)) as u64;
|
||||
// paths that send mail take noticeably longer than ones that don't.
|
||||
// Add a randomized sleep to mitigate this somewhat.
|
||||
use rand::{rngs::SmallRng, RngExt};
|
||||
let mut rng: SmallRng = rand::make_rng();
|
||||
let sleep_ms = rng.random_range(900..=1100) as u64;
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
|
||||
} else {
|
||||
mail::send_register_verify_email(&data.email, &token).await?;
|
||||
@@ -1030,7 +1142,7 @@ struct ConnectData {
|
||||
#[field(name = uncased("code_verifier"))]
|
||||
code_verifier: Option<OIDCCodeVerifier>,
|
||||
}
|
||||
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
||||
fn _check_is_some<T>(value: Option<&T>, msg: &str) -> EmptyResult {
|
||||
if value.is_none() {
|
||||
err!(msg)
|
||||
}
|
||||
@@ -1049,13 +1161,16 @@ fn prevalidate() -> JsonResult {
|
||||
}
|
||||
}
|
||||
|
||||
const SSO_BINDING_COOKIE: &str = "VW_SSO_BINDING";
|
||||
|
||||
#[get("/connect/oidc-signin?<code>&<state>", rank = 1)]
|
||||
async fn oidcsignin(code: OIDCCode, state: String, mut conn: DbConn) -> ApiResult<Redirect> {
|
||||
async fn oidcsignin(code: OIDCCode, state: String, cookies: &CookieJar<'_>, mut conn: DbConn) -> ApiResult<Redirect> {
|
||||
_oidcsignin_redirect(
|
||||
state,
|
||||
OIDCCodeWrapper::Ok {
|
||||
code,
|
||||
},
|
||||
cookies,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
@@ -1068,6 +1183,7 @@ async fn oidcsignin_error(
|
||||
state: String,
|
||||
error: String,
|
||||
error_description: Option<String>,
|
||||
cookies: &CookieJar<'_>,
|
||||
mut conn: DbConn,
|
||||
) -> ApiResult<Redirect> {
|
||||
_oidcsignin_redirect(
|
||||
@@ -1076,6 +1192,7 @@ async fn oidcsignin_error(
|
||||
error,
|
||||
error_description,
|
||||
},
|
||||
cookies,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
@@ -1087,6 +1204,7 @@ async fn oidcsignin_error(
|
||||
async fn _oidcsignin_redirect(
|
||||
base64_state: String,
|
||||
code_response: OIDCCodeWrapper,
|
||||
cookies: &CookieJar<'_>,
|
||||
conn: &mut DbConn,
|
||||
) -> ApiResult<Redirect> {
|
||||
let state = sso::decode_state(&base64_state)?;
|
||||
@@ -1095,6 +1213,17 @@ async fn _oidcsignin_redirect(
|
||||
None => err!(format!("Cannot retrieve sso_auth for {state}")),
|
||||
Some(sso_auth) => sso_auth,
|
||||
};
|
||||
|
||||
// Browser-binding check
|
||||
// The cookie was set on /connect/authorize and must come from the same browser that initiated the flow.
|
||||
let cookie_value = cookies.get(SSO_BINDING_COOKIE).map(|c| c.value().to_string());
|
||||
let provided_hash = cookie_value.as_deref().map(|v| crypto::sha256_hex(v.as_bytes()));
|
||||
match (sso_auth.binding_hash.as_deref(), provided_hash.as_deref()) {
|
||||
(Some(expected), Some(actual)) if crypto::ct_eq(expected, actual) => {}
|
||||
_ => err!(format!("SSO session binding mismatch for {state}")),
|
||||
}
|
||||
cookies.remove(Cookie::build(SSO_BINDING_COOKIE).path("/identity/connect/").build());
|
||||
|
||||
sso_auth.code_response = Some(code_response);
|
||||
sso_auth.updated_at = Utc::now().naive_utc();
|
||||
sso_auth.save(conn).await?;
|
||||
@@ -1141,7 +1270,7 @@ struct AuthorizeData {
|
||||
|
||||
// The `redirect_uri` will change depending of the client (web, android, ios ..)
|
||||
#[get("/connect/authorize?<data..>")]
|
||||
async fn authorize(data: AuthorizeData, conn: DbConn) -> ApiResult<Redirect> {
|
||||
async fn authorize(data: AuthorizeData, cookies: &CookieJar<'_>, secure: Secure, conn: DbConn) -> ApiResult<Redirect> {
|
||||
let AuthorizeData {
|
||||
client_id,
|
||||
redirect_uri,
|
||||
@@ -1155,7 +1284,23 @@ async fn authorize(data: AuthorizeData, conn: DbConn) -> ApiResult<Redirect> {
|
||||
err!("Unsupported code challenge method");
|
||||
}
|
||||
|
||||
let auth_url = sso::authorize_url(state, code_challenge, &client_id, &redirect_uri, conn).await?;
|
||||
// Generate browser-binding token. Stored hashed in DB; raw value handed to the browser as a cookie.
|
||||
// Validated on /connect/oidc-signin
|
||||
let binding_token = data_encoding::BASE64URL_NOPAD.encode(&crypto::get_random_bytes::<32>());
|
||||
let binding_hash = crypto::sha256_hex(binding_token.as_bytes());
|
||||
|
||||
let auth_url =
|
||||
sso::authorize_url(state, code_challenge, &client_id, &redirect_uri, Some(binding_hash), conn).await?;
|
||||
|
||||
cookies.add(
|
||||
Cookie::build((SSO_BINDING_COOKIE, binding_token))
|
||||
.path("/identity/connect/")
|
||||
.max_age(time::Duration::seconds(sso::SSO_AUTH_EXPIRATION.num_seconds()))
|
||||
.same_site(SameSite::Lax) // Lax is needed because the IdP runs on a different FQDN
|
||||
.http_only(true)
|
||||
.secure(secure.https)
|
||||
.build(),
|
||||
);
|
||||
|
||||
Ok(Redirect::temporary(String::from(auth_url)))
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ pub type EmptyResult = ApiResult<()>;
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct PasswordOrOtpData {
|
||||
#[serde(alias = "MasterPasswordHash")]
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
}
|
||||
|
||||
@@ -338,7 +338,7 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
// NOTE: The last modified date needs to be updated before calling these methods
|
||||
pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &DbConn) {
|
||||
pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: Option<&PushId>, conn: &DbConn) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
@@ -358,15 +358,16 @@ impl WebSocketUsers {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_logout(&self, user: &User, acting_device_id: Option<DeviceId>, conn: &DbConn) {
|
||||
pub async fn send_logout(&self, user: &User, acting_device: Option<&Device>, conn: &DbConn) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let acting_device_id = acting_device.map(|d| d.uuid.clone());
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
UpdateType::LogOut,
|
||||
acting_device_id.clone(),
|
||||
acting_device_id,
|
||||
);
|
||||
|
||||
if CONFIG.enable_websocket() {
|
||||
@@ -374,7 +375,7 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_logout(user, acting_device_id.clone(), conn).await;
|
||||
push_logout(user, acting_device, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+6
-8
@@ -13,7 +13,7 @@ use tokio::sync::RwLock;
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, UpdateType},
|
||||
db::{
|
||||
models::{AuthRequestId, Cipher, Device, DeviceId, Folder, PushId, Send, User, UserId},
|
||||
models::{AuthRequestId, Cipher, Device, Folder, PushId, Send, User, UserId},
|
||||
DbConn,
|
||||
},
|
||||
http_client::make_http_request,
|
||||
@@ -135,7 +135,7 @@ pub async fn register_push_device(device: &mut Device, conn: &DbConn) -> EmptyRe
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn unregister_push_device(push_id: &Option<PushId>) -> EmptyResult {
|
||||
pub async fn unregister_push_device(push_id: Option<&PushId>) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() || push_id.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -188,15 +188,13 @@ pub async fn push_cipher_update(ut: UpdateType, cipher: &Cipher, device: &Device
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push_logout(user: &User, acting_device_id: Option<DeviceId>, conn: &DbConn) {
|
||||
let acting_device_id: Value = acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| Value::Null);
|
||||
|
||||
pub async fn push_logout(user: &User, acting_device: Option<&Device>, conn: &DbConn) {
|
||||
if Device::check_user_has_push_device(&user.uuid, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user.uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_id,
|
||||
"identifier": acting_device_id,
|
||||
"deviceId": acting_device.and_then(|d| d.push_uuid.as_ref()),
|
||||
"identifier": acting_device.map(|d| &d.uuid),
|
||||
"type": UpdateType::LogOut as i32,
|
||||
"payload": {
|
||||
"userId": user.uuid,
|
||||
@@ -208,7 +206,7 @@ pub async fn push_logout(user: &User, acting_device_id: Option<DeviceId>, conn:
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &DbConn) {
|
||||
pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: Option<&PushId>, conn: &DbConn) {
|
||||
if Device::check_user_has_push_device(&user.uuid, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user.uuid,
|
||||
|
||||
+5
-3
@@ -60,11 +60,13 @@ fn vaultwarden_css() -> Cached<Css<String>> {
|
||||
"mail_2fa_enabled": CONFIG._enable_email_2fa(),
|
||||
"mail_enabled": CONFIG.mail_enabled(),
|
||||
"sends_allowed": CONFIG.sends_allowed(),
|
||||
"remember_2fa_disabled": CONFIG.disable_2fa_remember(),
|
||||
"password_hints_allowed": CONFIG.password_hints_allowed(),
|
||||
"signup_disabled": CONFIG.is_signup_disabled(),
|
||||
"sso_enabled": CONFIG.sso_enabled(),
|
||||
"sso_only": CONFIG.sso_enabled() && CONFIG.sso_only(),
|
||||
"yubico_enabled": CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some(),
|
||||
"webauthn_2fa_supported": CONFIG.is_webauthn_2fa_supported(),
|
||||
"yubico_enabled": CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some(),
|
||||
});
|
||||
|
||||
let scss = match CONFIG.render_template("scss/vaultwarden.scss", &css_options) {
|
||||
@@ -238,8 +240,8 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
|
||||
"jdenticon-3.3.0.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon-3.3.0.js"))),
|
||||
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.7.1.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.1.slim.js")))
|
||||
"jquery-4.0.0.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-4.0.0.slim.js")))
|
||||
}
|
||||
_ => err!(format!("Static file not found: {filename}")),
|
||||
}
|
||||
|
||||
+68
-19
@@ -46,6 +46,7 @@ static JWT_FILE_DOWNLOAD_ISSUER: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}|file_download", CONFIG.domain_origin()));
|
||||
static JWT_REGISTER_VERIFY_ISSUER: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}|register_verify", CONFIG.domain_origin()));
|
||||
static JWT_2FA_REMEMBER_ISSUER: LazyLock<String> = LazyLock::new(|| format!("{}|2faremember", CONFIG.domain_origin()));
|
||||
|
||||
static PRIVATE_RSA_KEY: OnceLock<EncodingKey> = OnceLock::new();
|
||||
static PUBLIC_RSA_KEY: OnceLock<DecodingKey> = OnceLock::new();
|
||||
@@ -160,6 +161,10 @@ pub fn decode_register_verify(token: &str) -> Result<RegisterVerifyClaims, Error
|
||||
decode_jwt(token, JWT_REGISTER_VERIFY_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_2fa_remember(token: &str) -> Result<TwoFactorRememberClaims, Error> {
|
||||
decode_jwt(token, JWT_2FA_REMEMBER_ISSUER.to_string())
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoginJwtClaims {
|
||||
// Not before
|
||||
@@ -440,6 +445,31 @@ pub fn generate_register_verify_claims(email: String, name: Option<String>, veri
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct TwoFactorRememberClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
pub exp: i64,
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: DeviceId,
|
||||
// UserId
|
||||
pub user_uuid: UserId,
|
||||
}
|
||||
|
||||
pub fn generate_2fa_remember_claims(device_uuid: DeviceId, user_uuid: UserId) -> TwoFactorRememberClaims {
|
||||
let time_now = Utc::now();
|
||||
TwoFactorRememberClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + TimeDelta::try_days(30).unwrap()).timestamp(),
|
||||
iss: JWT_2FA_REMEMBER_ISSUER.to_string(),
|
||||
sub: device_uuid,
|
||||
user_uuid,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BasicJwtClaims {
|
||||
// Not before
|
||||
@@ -674,10 +704,9 @@ pub struct OrgHeaders {
|
||||
|
||||
impl OrgHeaders {
|
||||
fn is_member(&self) -> bool {
|
||||
// NOTE: we don't care about MembershipStatus at the moment because this is only used
|
||||
// where an invited, accepted or confirmed user is expected if this ever changes or
|
||||
// if from_i32 is changed to return Some(Revoked) this check needs to be changed accordingly
|
||||
self.membership_type >= MembershipType::User
|
||||
// Only allow not revoked members, we can not use the Confirmed status here
|
||||
// as some endpoints can be triggered by invited users during joining
|
||||
self.membership_status != MembershipStatus::Revoked && self.membership_type >= MembershipType::User
|
||||
}
|
||||
fn is_confirmed_and_admin(&self) -> bool {
|
||||
self.membership_status == MembershipStatus::Confirmed && self.membership_type >= MembershipType::Admin
|
||||
@@ -690,6 +719,36 @@ impl OrgHeaders {
|
||||
}
|
||||
}
|
||||
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
// but there are cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_org_id(request: &Request<'_>) -> Option<OrganizationId> {
|
||||
if let Some(Ok(org_id)) = request.param::<OrganizationId>(1) {
|
||||
Some(org_id)
|
||||
} else if let Some(Ok(org_id)) = request.query_value::<OrganizationId>("organizationId") {
|
||||
Some(org_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
// Special Guard to ensure that there is an organization id present
|
||||
// If there is no org id trigger the Outcome::Forward.
|
||||
// This is useful for endpoints which work for both organization and personal vaults, like purge.
|
||||
pub struct OrgIdGuard;
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for OrgIdGuard {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
match get_org_id(request) {
|
||||
Some(_) => Outcome::Success(OrgIdGuard),
|
||||
None => Outcome::Forward(rocket::http::Status::NotFound),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
type Error = &'static str;
|
||||
@@ -697,18 +756,8 @@ impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(Headers::from_request(request).await);
|
||||
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
// but there are cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
let url_org_id: Option<OrganizationId> = {
|
||||
if let Some(Ok(org_id)) = request.param::<OrganizationId>(1) {
|
||||
Some(org_id)
|
||||
} else if let Some(Ok(org_id)) = request.query_value::<OrganizationId>("organizationId") {
|
||||
Some(org_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
// Extract the org_id from the request
|
||||
let url_org_id = get_org_id(request);
|
||||
|
||||
match url_org_id {
|
||||
Some(org_id) if uuid::Uuid::parse_str(&org_id).is_ok() => {
|
||||
@@ -826,7 +875,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !Collection::can_access_collection(&headers.membership, &col_id, &conn).await {
|
||||
if !Collection::is_coll_manageable_by_user(&col_id, &headers.membership.user_uuid, &conn).await {
|
||||
err_handler!("The current user isn't a manager for this collection")
|
||||
}
|
||||
}
|
||||
@@ -908,8 +957,8 @@ impl ManagerHeaders {
|
||||
if uuid::Uuid::parse_str(col_id.as_ref()).is_err() {
|
||||
err!("Collection Id is malformed!");
|
||||
}
|
||||
if !Collection::can_access_collection(&h.membership, col_id, conn).await {
|
||||
err!("You don't have access to all collections!");
|
||||
if !Collection::is_coll_manageable_by_user(col_id, &h.membership.user_uuid, conn).await {
|
||||
err!("Collection not found", "The current user isn't a manager for this collection")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+55
-35
@@ -14,7 +14,10 @@ use serde::de::{self, Deserialize, Deserializer, MapAccess, Visitor};
|
||||
|
||||
use crate::{
|
||||
error::Error,
|
||||
util::{get_env, get_env_bool, get_web_vault_version, is_valid_email, parse_experimental_client_feature_flags},
|
||||
util::{
|
||||
get_active_web_release, get_env, get_env_bool, is_valid_email, parse_experimental_client_feature_flags,
|
||||
FeatureFlagFilter,
|
||||
},
|
||||
};
|
||||
|
||||
static CONFIG_FILE: LazyLock<String> = LazyLock::new(|| {
|
||||
@@ -920,7 +923,7 @@ make_config! {
|
||||
},
|
||||
}
|
||||
|
||||
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
fn validate_config(cfg: &ConfigItems, on_update: bool) -> Result<(), Error> {
|
||||
// Validate connection URL is valid and DB feature is enabled
|
||||
#[cfg(sqlite)]
|
||||
{
|
||||
@@ -1026,33 +1029,17 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
// Server (v2025.6.2): https://github.com/bitwarden/server/blob/d094be3267f2030bd0dc62106bc6871cf82682f5/src/Core/Constants.cs#L103
|
||||
// Client (web-v2025.6.1): https://github.com/bitwarden/clients/blob/747c2fd6a1c348a57a76e4a7de8128466ffd3c01/libs/common/src/enums/feature-flag.enum.ts#L12
|
||||
// Android (v2025.6.0): https://github.com/bitwarden/android/blob/b5b022caaad33390c31b3021b2c1205925b0e1a2/app/src/main/kotlin/com/x8bit/bitwarden/data/platform/manager/model/FlagKey.kt#L22
|
||||
// iOS (v2025.6.0): https://github.com/bitwarden/ios/blob/ff06d9c6cc8da89f78f37f376495800201d7261a/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
|
||||
//
|
||||
// NOTE: Move deprecated flags to the utils::parse_experimental_client_feature_flags() DEPRECATED_FLAGS const!
|
||||
const KNOWN_FLAGS: &[&str] = &[
|
||||
// Autofill Team
|
||||
"inline-menu-positioning-improvements",
|
||||
"inline-menu-totp",
|
||||
"ssh-agent",
|
||||
// Key Management Team
|
||||
"ssh-key-vault-item",
|
||||
"pm-25373-windows-biometrics-v2",
|
||||
// Tools
|
||||
"export-attachments",
|
||||
// Mobile Team
|
||||
"anon-addy-self-host-alias",
|
||||
"simple-login-self-host-alias",
|
||||
"mutual-tls",
|
||||
];
|
||||
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
|
||||
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();
|
||||
let invalid_flags =
|
||||
parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags, FeatureFlagFilter::InvalidOnly);
|
||||
if !invalid_flags.is_empty() {
|
||||
err!(format!("Unrecognized experimental client feature flags: {invalid_flags:?}.\n\n\
|
||||
let feature_flags_error = format!("Unrecognized experimental client feature flags: {:?}.\n\
|
||||
Please ensure all feature flags are spelled correctly and that they are supported in this version.\n\
|
||||
Supported flags: {KNOWN_FLAGS:?}"));
|
||||
Supported flags: {:?}\n", invalid_flags, SUPPORTED_FEATURE_FLAGS);
|
||||
if on_update {
|
||||
err!(feature_flags_error);
|
||||
} else {
|
||||
println!("[WARNING] {feature_flags_error}");
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_FILESIZE_KB: i64 = i64::MAX >> 10;
|
||||
@@ -1089,7 +1076,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
|
||||
validate_internal_sso_issuer_url(&cfg.sso_authority)?;
|
||||
validate_internal_sso_redirect_url(&cfg.sso_callback_path)?;
|
||||
validate_sso_master_password_policy(&cfg.sso_master_password_policy)?;
|
||||
validate_sso_master_password_policy(cfg.sso_master_password_policy.as_ref())?;
|
||||
}
|
||||
|
||||
if cfg._enable_yubico {
|
||||
@@ -1284,7 +1271,7 @@ fn validate_internal_sso_redirect_url(sso_callback_path: &String) -> Result<open
|
||||
}
|
||||
|
||||
fn validate_sso_master_password_policy(
|
||||
sso_master_password_policy: &Option<String>,
|
||||
sso_master_password_policy: Option<&String>,
|
||||
) -> Result<Option<serde_json::Value>, Error> {
|
||||
let policy = sso_master_password_policy.as_ref().map(|mpp| serde_json::from_str::<serde_json::Value>(mpp));
|
||||
|
||||
@@ -1325,12 +1312,16 @@ fn generate_smtp_img_src(embed_images: bool, domain: &str) -> String {
|
||||
if embed_images {
|
||||
"cid:".to_string()
|
||||
} else {
|
||||
format!("{domain}/vw_static/")
|
||||
// normalize base_url
|
||||
let base_url = domain.trim_end_matches('/');
|
||||
format!("{base_url}/vw_static/")
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_sso_callback_path(domain: &str) -> String {
|
||||
format!("{domain}/identity/connect/oidc-signin")
|
||||
// normalize base_url
|
||||
let base_url = domain.trim_end_matches('/');
|
||||
format!("{base_url}/identity/connect/oidc-signin")
|
||||
}
|
||||
|
||||
/// Generate the correct URL for the icon service.
|
||||
@@ -1467,6 +1458,35 @@ pub enum PathType {
|
||||
RsaKey,
|
||||
}
|
||||
|
||||
// Official available feature flags can be found here:
|
||||
// Server (v2026.2.1): https://github.com/bitwarden/server/blob/0e42725d0837bd1c0dabd864ff621a579959744b/src/Core/Constants.cs#L135
|
||||
// Client (v2026.2.1): https://github.com/bitwarden/clients/blob/f96380c3138291a028bdd2c7a5fee540d5c98ba5/libs/common/src/enums/feature-flag.enum.ts#L12
|
||||
// Android (v2026.2.1): https://github.com/bitwarden/android/blob/6902c19c0093fa476bbf74ccaa70c9f14afbb82f/core/src/main/kotlin/com/bitwarden/core/data/manager/model/FlagKey.kt#L31
|
||||
// iOS (v2026.2.1): https://github.com/bitwarden/ios/blob/cdd9ba1770ca2ffc098d02d12cc3208e3a830454/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
|
||||
pub const SUPPORTED_FEATURE_FLAGS: &[&str] = &[
|
||||
// Architecture
|
||||
"desktop-ui-migration-milestone-1",
|
||||
"desktop-ui-migration-milestone-2",
|
||||
"desktop-ui-migration-milestone-3",
|
||||
"desktop-ui-migration-milestone-4",
|
||||
// Auth Team
|
||||
"pm-5594-safari-account-switching",
|
||||
// Autofill Team
|
||||
"ssh-agent",
|
||||
"ssh-agent-v2",
|
||||
// Key Management Team
|
||||
"ssh-key-vault-item",
|
||||
"pm-25373-windows-biometrics-v2",
|
||||
// Mobile Team
|
||||
"anon-addy-self-host-alias",
|
||||
"simple-login-self-host-alias",
|
||||
"mutual-tls",
|
||||
"cxp-import-mobile",
|
||||
"cxp-export-mobile",
|
||||
// Platform Team
|
||||
"pm-30529-webauthn-related-origins",
|
||||
];
|
||||
|
||||
impl Config {
|
||||
pub async fn load() -> Result<Self, Error> {
|
||||
// Loading from env and file
|
||||
@@ -1480,7 +1500,7 @@ impl Config {
|
||||
// Fill any missing with defaults
|
||||
let config = builder.build();
|
||||
if !SKIP_CONFIG_VALIDATION.load(Ordering::Relaxed) {
|
||||
validate_config(&config)?;
|
||||
validate_config(&config, false)?;
|
||||
}
|
||||
|
||||
Ok(Config {
|
||||
@@ -1516,7 +1536,7 @@ impl Config {
|
||||
let env = &self.inner.read().unwrap()._env;
|
||||
env.merge(&builder, false, &mut overrides).build()
|
||||
};
|
||||
validate_config(&config)?;
|
||||
validate_config(&config, true)?;
|
||||
|
||||
// Save both the user and the combined config
|
||||
{
|
||||
@@ -1705,7 +1725,7 @@ impl Config {
|
||||
}
|
||||
|
||||
pub fn sso_master_password_policy_value(&self) -> Option<serde_json::Value> {
|
||||
validate_sso_master_password_policy(&self.sso_master_password_policy()).ok().flatten()
|
||||
validate_sso_master_password_policy(self.sso_master_password_policy().as_ref()).ok().flatten()
|
||||
}
|
||||
|
||||
pub fn sso_scopes_vec(&self) -> Vec<String> {
|
||||
@@ -1845,7 +1865,7 @@ fn to_json<'reg, 'rc>(
|
||||
// Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then.
|
||||
// The default is based upon the version since this feature is added.
|
||||
static WEB_VAULT_VERSION: LazyLock<semver::Version> = LazyLock::new(|| {
|
||||
let vault_version = get_web_vault_version();
|
||||
let vault_version = get_active_web_release();
|
||||
// Use a single regex capture to extract version components
|
||||
let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap();
|
||||
re.captures(&vault_version)
|
||||
|
||||
+9
-2
@@ -55,13 +55,13 @@ pub fn encode_random_bytes<const N: usize>(e: &Encoding) -> String {
|
||||
/// Generates a random string over a specified alphabet.
|
||||
pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
|
||||
// Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html
|
||||
use rand::Rng;
|
||||
use rand::RngExt;
|
||||
let mut rng = rand::rng();
|
||||
|
||||
(0..num_chars)
|
||||
.map(|_| {
|
||||
let i = rng.random_range(0..alphabet.len());
|
||||
alphabet[i] as char
|
||||
char::from(alphabet[i])
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@@ -113,3 +113,10 @@ pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
|
||||
use subtle::ConstantTimeEq;
|
||||
a.as_ref().ct_eq(b.as_ref()).into()
|
||||
}
|
||||
|
||||
//
|
||||
// SHA256
|
||||
//
|
||||
pub fn sha256_hex(data: &[u8]) -> String {
|
||||
HEXLOWER.encode(digest::digest(&digest::SHA256, data).as_ref())
|
||||
}
|
||||
|
||||
+7
-11
@@ -387,7 +387,6 @@ pub mod models;
|
||||
#[cfg(sqlite)]
|
||||
pub fn backup_sqlite() -> Result<String, Error> {
|
||||
use diesel::Connection;
|
||||
use std::{fs::File, io::Write};
|
||||
|
||||
let db_url = CONFIG.database_url();
|
||||
if DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::Sqlite).unwrap_or(false) {
|
||||
@@ -401,16 +400,13 @@ pub fn backup_sqlite() -> Result<String, Error> {
|
||||
.to_string_lossy()
|
||||
.into_owned();
|
||||
|
||||
match File::create(backup_file.clone()) {
|
||||
Ok(mut f) => {
|
||||
let serialized_db = conn.serialize_database_to_buffer();
|
||||
f.write_all(serialized_db.as_slice()).expect("Error writing SQLite backup");
|
||||
Ok(backup_file)
|
||||
}
|
||||
Err(e) => {
|
||||
err_silent!(format!("Unable to save SQLite backup: {e:?}"))
|
||||
}
|
||||
}
|
||||
diesel::sql_query("VACUUM INTO ?")
|
||||
.bind::<diesel::sql_types::Text, _>(&backup_file)
|
||||
.execute(&mut conn)
|
||||
.map(|_| ())
|
||||
.map_res("VACUUM INTO failed")?;
|
||||
|
||||
Ok(backup_file)
|
||||
} else {
|
||||
err_silent!("The database type is not SQLite. Backups only works for SQLite databases")
|
||||
}
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
use chrono::NaiveDateTime;
|
||||
use diesel::prelude::*;
|
||||
|
||||
use super::{CipherId, User, UserId};
|
||||
use crate::api::EmptyResult;
|
||||
use crate::db::schema::archives;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[diesel(table_name = archives)]
|
||||
#[diesel(primary_key(user_uuid, cipher_uuid))]
|
||||
pub struct Archive {
|
||||
pub user_uuid: UserId,
|
||||
pub cipher_uuid: CipherId,
|
||||
pub archived_at: NaiveDateTime,
|
||||
}
|
||||
|
||||
impl Archive {
|
||||
// Returns the date the specified cipher was archived
|
||||
pub async fn get_archived_at(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
|
||||
db_run! { conn: {
|
||||
archives::table
|
||||
.filter(archives::cipher_uuid.eq(cipher_uuid))
|
||||
.filter(archives::user_uuid.eq(user_uuid))
|
||||
.select(archives::archived_at)
|
||||
.first::<NaiveDateTime>(conn).ok()
|
||||
}}
|
||||
}
|
||||
|
||||
// Saves (inserts or updates) an archive record with the provided timestamp
|
||||
pub async fn save(
|
||||
user_uuid: &UserId,
|
||||
cipher_uuid: &CipherId,
|
||||
archived_at: NaiveDateTime,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
diesel::replace_into(archives::table)
|
||||
.values((
|
||||
archives::user_uuid.eq(user_uuid),
|
||||
archives::cipher_uuid.eq(cipher_uuid),
|
||||
archives::archived_at.eq(archived_at),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error saving archive")
|
||||
}
|
||||
postgresql {
|
||||
diesel::insert_into(archives::table)
|
||||
.values((
|
||||
archives::user_uuid.eq(user_uuid),
|
||||
archives::cipher_uuid.eq(cipher_uuid),
|
||||
archives::archived_at.eq(archived_at),
|
||||
))
|
||||
.on_conflict((archives::user_uuid, archives::cipher_uuid))
|
||||
.do_update()
|
||||
.set(archives::archived_at.eq(archived_at))
|
||||
.execute(conn)
|
||||
.map_res("Error saving archive")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deletes an archive record for a specific cipher
|
||||
pub async fn delete_by_cipher(user_uuid: &UserId, cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
archives::table
|
||||
.filter(archives::user_uuid.eq(user_uuid))
|
||||
.filter(archives::cipher_uuid.eq(cipher_uuid))
|
||||
)
|
||||
.execute(conn)
|
||||
.map_res("Error deleting archive")
|
||||
}}
|
||||
}
|
||||
|
||||
/// Return a vec with (cipher_uuid, archived_at)
|
||||
/// This is used during a full sync so we only need one query for all archive matches
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, NaiveDateTime)> {
|
||||
db_run! { conn: {
|
||||
archives::table
|
||||
.filter(archives::user_uuid.eq(user_uuid))
|
||||
.select((archives::cipher_uuid, archives::archived_at))
|
||||
.load::<(CipherId, NaiveDateTime)>(conn)
|
||||
.unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
@@ -50,7 +50,7 @@ impl Attachment {
|
||||
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
|
||||
Ok(format!("{host}/attachments/{}/{}?token={token}", self.cipher_uuid, self.id))
|
||||
} else {
|
||||
Ok(operator.presign_read(&self.get_file_path(), Duration::from_secs(5 * 60)).await?.uri().to_string())
|
||||
Ok(operator.presign_read(&self.get_file_path(), Duration::from_mins(5)).await?.uri().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -177,7 +177,9 @@ impl AuthRequest {
|
||||
}
|
||||
|
||||
pub async fn purge_expired_auth_requests(conn: &DbConn) {
|
||||
let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(5).unwrap(); //after 5 minutes, clients reject the request
|
||||
// delete auth requests older than 15 minutes which is functionally equivalent to upstream:
|
||||
// https://github.com/bitwarden/server/blob/f8ee2270409f7a13125cd414c450740af605a175/src/Sql/dbo/Auth/Stored%20Procedures/AuthRequest_DeleteIfExpired.sql
|
||||
let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(15).unwrap();
|
||||
for auth_request in Self::find_created_before(&expiry_time, conn).await {
|
||||
auth_request.delete(conn).await.ok();
|
||||
}
|
||||
|
||||
+50
-24
@@ -10,8 +10,8 @@ use diesel::prelude::*;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{
|
||||
Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus,
|
||||
MembershipType, OrganizationId, User, UserId,
|
||||
Archive, Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership,
|
||||
MembershipStatus, MembershipType, OrganizationId, User, UserId,
|
||||
};
|
||||
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
|
||||
use macros::UuidFromParam;
|
||||
@@ -380,6 +380,11 @@ impl Cipher {
|
||||
} else {
|
||||
self.is_favorite(user_uuid, conn).await
|
||||
});
|
||||
json_object["archivedDate"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
cipher_sync_data.cipher_archives.get(&self.uuid).map_or(Value::Null, |d| Value::String(format_date(d)))
|
||||
} else {
|
||||
self.get_archived_at(user_uuid, conn).await.map_or(Value::Null, |d| Value::String(format_date(&d)))
|
||||
});
|
||||
// These values are true by default, but can be false if the
|
||||
// cipher belongs to a collection or group where the org owner has enabled
|
||||
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
||||
@@ -398,7 +403,7 @@ impl Cipher {
|
||||
3 => "card",
|
||||
4 => "identity",
|
||||
5 => "sshKey",
|
||||
_ => panic!("Wrong type"),
|
||||
_ => err!(format!("Cipher {} has an invalid type {}", self.uuid, self.atype)),
|
||||
};
|
||||
|
||||
json_object[key] = type_data_json;
|
||||
@@ -559,7 +564,7 @@ impl Cipher {
|
||||
if let Some(cached_member) = cipher_sync_data.members.get(org_uuid) {
|
||||
return cached_member.has_full_access();
|
||||
}
|
||||
} else if let Some(member) = Membership::find_by_user_and_org(user_uuid, org_uuid, conn).await {
|
||||
} else if let Some(member) = Membership::find_confirmed_by_user_and_org(user_uuid, org_uuid, conn).await {
|
||||
return member.has_full_access();
|
||||
}
|
||||
}
|
||||
@@ -668,10 +673,12 @@ impl Cipher {
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(&self.uuid))
|
||||
.inner_join(ciphers_collections::table.on(
|
||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)))
|
||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
||||
))
|
||||
.inner_join(users_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_uuid))))
|
||||
.and(users_collections::user_uuid.eq(user_uuid))
|
||||
))
|
||||
.select((users_collections::read_only, users_collections::hide_passwords, users_collections::manage))
|
||||
.load::<(bool, bool, bool)>(conn)
|
||||
.expect("Error getting user access restrictions")
|
||||
@@ -697,6 +704,9 @@ impl Cipher {
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
|
||||
))
|
||||
.inner_join(groups::table.on(groups::uuid.eq(collections_groups::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.select((collections_groups::read_only, collections_groups::hide_passwords, collections_groups::manage))
|
||||
.load::<(bool, bool, bool)>(conn)
|
||||
@@ -737,6 +747,18 @@ impl Cipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_archived_at(&self, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
|
||||
Archive::get_archived_at(&self.uuid, user_uuid, conn).await
|
||||
}
|
||||
|
||||
pub async fn set_archived_at(&self, archived_at: NaiveDateTime, user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
|
||||
Archive::save(user_uuid, &self.uuid, archived_at, conn).await
|
||||
}
|
||||
|
||||
pub async fn unarchive(&self, user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
|
||||
Archive::delete_by_cipher(user_uuid, &self.uuid, conn).await
|
||||
}
|
||||
|
||||
pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option<FolderId> {
|
||||
db_run! { conn: {
|
||||
folders_ciphers::table
|
||||
@@ -795,28 +817,28 @@ impl Cipher {
|
||||
let mut query = ciphers::table
|
||||
.left_join(ciphers_collections::table.on(
|
||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
||||
))
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||
.and(users_organizations::status.eq(MembershipStatus::Confirmed as i32))
|
||||
))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
|
||||
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
|
||||
))
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
// Ensure that group and membership belong to the same org
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
collections_groups::groups_uuid.eq(groups::uuid)
|
||||
)
|
||||
))
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(collections_groups::groups_uuid.eq(groups::uuid))
|
||||
))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
|
||||
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
|
||||
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
||||
@@ -986,7 +1008,9 @@ impl Cipher {
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(collections_groups::groups_uuid.eq(groups::uuid))
|
||||
@@ -1047,7 +1071,9 @@ impl Cipher {
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(collections_groups::groups_uuid.eq(groups::uuid))
|
||||
@@ -1115,8 +1141,8 @@ impl Cipher {
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
|
||||
+19
-14
@@ -191,7 +191,7 @@ impl Collection {
|
||||
self.update_users_revision(conn).await;
|
||||
CollectionCipher::delete_all_by_collection(&self.uuid, conn).await?;
|
||||
CollectionUser::delete_all_by_collection(&self.uuid, conn).await?;
|
||||
CollectionGroup::delete_all_by_collection(&self.uuid, conn).await?;
|
||||
CollectionGroup::delete_all_by_collection(&self.uuid, &self.org_uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(collections::table.filter(collections::uuid.eq(self.uuid)))
|
||||
@@ -239,8 +239,8 @@ impl Collection {
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
@@ -355,8 +355,8 @@ impl Collection {
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
@@ -422,8 +422,8 @@ impl Collection {
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||
@@ -484,8 +484,8 @@ impl Collection {
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
@@ -513,7 +513,8 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn is_manageable_by_user(&self, user_uuid: &UserId, conn: &DbConn) -> bool {
|
||||
pub async fn is_coll_manageable_by_user(uuid: &CollectionId, user_uuid: &UserId, conn: &DbConn) -> bool {
|
||||
let uuid = uuid.to_string();
|
||||
let user_uuid = user_uuid.to_string();
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
@@ -530,17 +531,17 @@ impl Collection {
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(&self.uuid))
|
||||
.filter(collections::uuid.eq(&uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::manage.eq(true)).or(// Directly accessed collection
|
||||
users_collections::collection_uuid.eq(&uuid).and(users_collections::manage.eq(true)).or(// Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
@@ -558,6 +559,10 @@ impl Collection {
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn is_manageable_by_user(&self, user_uuid: &UserId, conn: &DbConn) -> bool {
|
||||
Self::is_coll_manageable_by_user(&self.uuid, user_uuid, conn).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Database methods
|
||||
|
||||
+28
-6
@@ -1,6 +1,6 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
|
||||
use data_encoding::{BASE64, BASE64URL};
|
||||
use data_encoding::BASE64URL;
|
||||
use derive_more::{Display, From};
|
||||
use serde_json::Value;
|
||||
|
||||
@@ -25,7 +25,7 @@ pub struct Device {
|
||||
pub user_uuid: UserId,
|
||||
|
||||
pub name: String,
|
||||
pub atype: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs
|
||||
pub atype: i32, // https://github.com/bitwarden/server/blob/8d547dcc280babab70dd4a3c94ced6a34b12dfbf/src/Core/Enums/DeviceType.cs
|
||||
pub push_uuid: Option<PushId>,
|
||||
pub push_token: Option<String>,
|
||||
|
||||
@@ -49,11 +49,16 @@ impl Device {
|
||||
|
||||
push_uuid: Some(PushId(get_uuid())),
|
||||
push_token: None,
|
||||
refresh_token: crypto::encode_random_bytes::<64>(&BASE64URL),
|
||||
refresh_token: Device::generate_refresh_token(),
|
||||
twofactor_remember: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn generate_refresh_token() -> String {
|
||||
crypto::encode_random_bytes::<64>(&BASE64URL)
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"id": self.uuid,
|
||||
@@ -67,10 +72,13 @@ impl Device {
|
||||
}
|
||||
|
||||
pub fn refresh_twofactor_remember(&mut self) -> String {
|
||||
let twofactor_remember = crypto::encode_random_bytes::<180>(&BASE64);
|
||||
self.twofactor_remember = Some(twofactor_remember.clone());
|
||||
use crate::auth::{encode_jwt, generate_2fa_remember_claims};
|
||||
|
||||
twofactor_remember
|
||||
let two_factor_remember_claim = generate_2fa_remember_claims(self.uuid.clone(), self.user_uuid.clone());
|
||||
let two_factor_remember_string = encode_jwt(&two_factor_remember_claim);
|
||||
self.twofactor_remember = Some(two_factor_remember_string.clone());
|
||||
|
||||
two_factor_remember_string
|
||||
}
|
||||
|
||||
pub fn delete_twofactor_remember(&mut self) {
|
||||
@@ -257,6 +265,17 @@ impl Device {
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn rotate_refresh_tokens_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
|
||||
// Generate a new token per device.
|
||||
// We cannot do a single UPDATE with one value because each device needs a unique token.
|
||||
let devices = Self::find_by_user(user_uuid, conn).await;
|
||||
for mut device in devices {
|
||||
device.refresh_token = Device::generate_refresh_token();
|
||||
device.save(false, conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Display)]
|
||||
@@ -313,6 +332,8 @@ pub enum DeviceType {
|
||||
MacOsCLI = 24,
|
||||
#[display("Linux CLI")]
|
||||
LinuxCLI = 25,
|
||||
#[display("DuckDuckGo")]
|
||||
DuckDuckGoBrowser = 26,
|
||||
}
|
||||
|
||||
impl DeviceType {
|
||||
@@ -344,6 +365,7 @@ impl DeviceType {
|
||||
23 => DeviceType::WindowsCLI,
|
||||
24 => DeviceType::MacOsCLI,
|
||||
25 => DeviceType::LinuxCLI,
|
||||
26 => DeviceType::DuckDuckGoBrowser,
|
||||
_ => DeviceType::UnknownBrowser,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +85,8 @@ impl EmergencyAccess {
|
||||
pub async fn to_json_grantee_details(&self, conn: &DbConn) -> Option<Value> {
|
||||
let grantee_user = if let Some(grantee_uuid) = &self.grantee_uuid {
|
||||
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
|
||||
} else if let Some(email) = self.email.as_deref() {
|
||||
} else {
|
||||
let email = self.email.as_deref()?;
|
||||
match User::find_by_mail(email, conn).await {
|
||||
Some(user) => user,
|
||||
None => {
|
||||
@@ -94,8 +95,6 @@ impl EmergencyAccess {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
|
||||
Some(json!({
|
||||
|
||||
+62
-20
@@ -1,6 +1,6 @@
|
||||
use super::{CollectionId, Membership, MembershipId, OrganizationId, User, UserId};
|
||||
use crate::api::EmptyResult;
|
||||
use crate::db::schema::{collections_groups, groups, groups_users, users_organizations};
|
||||
use crate::db::schema::{collections, collections_groups, groups, groups_users, users_organizations};
|
||||
use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
@@ -81,7 +81,7 @@ impl Group {
|
||||
// If both read_only and hide_passwords are false, then manage should be true
|
||||
// You can't have an entry with read_only and manage, or hide_passwords and manage
|
||||
// Or an entry with everything to false
|
||||
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
|
||||
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, &self.organizations_uuid, conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|entry| {
|
||||
@@ -191,7 +191,7 @@ impl Group {
|
||||
|
||||
pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
|
||||
for group in Self::find_by_organization(org_uuid, conn).await {
|
||||
group.delete(conn).await?;
|
||||
group.delete(org_uuid, conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -246,8 +246,8 @@ impl Group {
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
|
||||
))
|
||||
.inner_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
.inner_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.filter(groups::access_all.eq(true))
|
||||
@@ -276,9 +276,9 @@ impl Group {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
CollectionGroup::delete_all_by_group(&self.uuid, conn).await?;
|
||||
GroupUser::delete_all_by_group(&self.uuid, conn).await?;
|
||||
pub async fn delete(&self, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
|
||||
CollectionGroup::delete_all_by_group(&self.uuid, org_uuid, conn).await?;
|
||||
GroupUser::delete_all_by_group(&self.uuid, org_uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(groups::table.filter(groups::uuid.eq(&self.uuid)))
|
||||
@@ -306,8 +306,8 @@ impl Group {
|
||||
}
|
||||
|
||||
impl CollectionGroup {
|
||||
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(&self.groups_uuid, conn).await;
|
||||
pub async fn save(&mut self, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(&self.groups_uuid, org_uuid, conn).await;
|
||||
for group_user in group_users {
|
||||
group_user.update_user_revision(conn).await;
|
||||
}
|
||||
@@ -365,10 +365,19 @@ impl CollectionGroup {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_by_group(group_uuid: &GroupId, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_group(group_uuid: &GroupId, org_uuid: &OrganizationId, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections_groups::table
|
||||
.inner_join(groups::table.on(
|
||||
groups::uuid.eq(collections_groups::groups_uuid)
|
||||
))
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(collections_groups::collections_uuid)
|
||||
.and(collections::org_uuid.eq(groups::organizations_uuid))
|
||||
))
|
||||
.filter(collections_groups::groups_uuid.eq(group_uuid))
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
.select(collections_groups::all_columns)
|
||||
.load::<Self>(conn)
|
||||
.expect("Error loading collection groups")
|
||||
}}
|
||||
@@ -383,6 +392,13 @@ impl CollectionGroup {
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
|
||||
))
|
||||
.inner_join(groups::table.on(groups::uuid.eq(collections_groups::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(collections_groups::collections_uuid)
|
||||
.and(collections::org_uuid.eq(groups::organizations_uuid))
|
||||
))
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.select(collections_groups::all_columns)
|
||||
.load::<Self>(conn)
|
||||
@@ -394,14 +410,20 @@ impl CollectionGroup {
|
||||
db_run! { conn: {
|
||||
collections_groups::table
|
||||
.filter(collections_groups::collections_uuid.eq(collection_uuid))
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(collections_groups::collections_uuid)
|
||||
))
|
||||
.inner_join(groups::table.on(groups::uuid.eq(collections_groups::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(collections::org_uuid))
|
||||
))
|
||||
.select(collections_groups::all_columns)
|
||||
.load::<Self>(conn)
|
||||
.expect("Error loading collection groups")
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(&self.groups_uuid, conn).await;
|
||||
pub async fn delete(&self, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(&self.groups_uuid, org_uuid, conn).await;
|
||||
for group_user in group_users {
|
||||
group_user.update_user_revision(conn).await;
|
||||
}
|
||||
@@ -415,8 +437,8 @@ impl CollectionGroup {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
|
||||
pub async fn delete_all_by_group(group_uuid: &GroupId, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(group_uuid, org_uuid, conn).await;
|
||||
for group_user in group_users {
|
||||
group_user.update_user_revision(conn).await;
|
||||
}
|
||||
@@ -429,10 +451,14 @@ impl CollectionGroup {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_collection(
|
||||
collection_uuid: &CollectionId,
|
||||
org_uuid: &OrganizationId,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await;
|
||||
for collection_assigned_to_group in collection_assigned_to_groups {
|
||||
let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await;
|
||||
let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, org_uuid, conn).await;
|
||||
for group_user in group_users {
|
||||
group_user.update_user_revision(conn).await;
|
||||
}
|
||||
@@ -494,10 +520,19 @@ impl GroupUser {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_by_group(group_uuid: &GroupId, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_group(group_uuid: &GroupId, org_uuid: &OrganizationId, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
groups_users::table
|
||||
.inner_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
|
||||
.and(users_organizations::org_uuid.eq(groups::organizations_uuid))
|
||||
))
|
||||
.filter(groups_users::groups_uuid.eq(group_uuid))
|
||||
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||
.select(groups_users::all_columns)
|
||||
.load::<Self>(conn)
|
||||
.expect("Error loading group users")
|
||||
}}
|
||||
@@ -522,6 +557,13 @@ impl GroupUser {
|
||||
.inner_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.inner_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(collections_groups::collections_uuid)
|
||||
.and(collections::org_uuid.eq(groups::organizations_uuid))
|
||||
))
|
||||
.filter(collections_groups::collections_uuid.eq(collection_uuid))
|
||||
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||
.count()
|
||||
@@ -575,8 +617,8 @@ impl GroupUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
|
||||
pub async fn delete_all_by_group(group_uuid: &GroupId, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(group_uuid, org_uuid, conn).await;
|
||||
for group_user in group_users {
|
||||
group_user.update_user_revision(conn).await;
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
mod archive;
|
||||
mod attachment;
|
||||
mod auth_request;
|
||||
mod cipher;
|
||||
@@ -17,6 +18,7 @@ mod two_factor_duo_context;
|
||||
mod two_factor_incomplete;
|
||||
mod user;
|
||||
|
||||
pub use self::archive::Archive;
|
||||
pub use self::attachment::{Attachment, AttachmentId};
|
||||
pub use self::auth_request::{AuthRequest, AuthRequestId};
|
||||
pub use self::cipher::{Cipher, CipherId, RepromptType};
|
||||
|
||||
@@ -269,7 +269,7 @@ impl OrgPolicy {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if let Some(user) = Membership::find_confirmed_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if user.atype < MembershipType::Admin {
|
||||
return true;
|
||||
}
|
||||
@@ -332,7 +332,7 @@ impl OrgPolicy {
|
||||
for policy in
|
||||
OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await
|
||||
{
|
||||
if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if let Some(user) = Membership::find_confirmed_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if user.atype < MembershipType::Admin {
|
||||
match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
|
||||
Ok(opts) => {
|
||||
|
||||
@@ -514,7 +514,8 @@ impl Membership {
|
||||
"familySponsorshipValidUntil": null,
|
||||
"familySponsorshipToDelete": null,
|
||||
"accessSecretsManager": false,
|
||||
"limitCollectionCreation": self.atype < MembershipType::Manager, // If less then a manager return true, to limit collection creations
|
||||
// limit collection creation to managers with access_all permission to prevent issues
|
||||
"limitCollectionCreation": self.atype < MembershipType::Manager || !self.access_all,
|
||||
"limitCollectionDeletion": true,
|
||||
"limitItemDeletion": false,
|
||||
"allowAdminAccessToAllCollectionItems": true,
|
||||
@@ -1073,7 +1074,9 @@ impl Membership {
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
|
||||
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
|
||||
))
|
||||
.left_join(ciphers_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(collections_groups::collections_uuid).and(ciphers_collections::cipher_uuid.eq(&cipher_uuid))
|
||||
|
||||
|
||||
@@ -46,6 +46,16 @@ pub enum SendType {
|
||||
File = 1,
|
||||
}
|
||||
|
||||
enum SendAuthType {
|
||||
#[allow(dead_code)]
|
||||
// Send requires email OTP verification
|
||||
Email = 0, // Not yet supported by Vaultwarden
|
||||
// Send requires a password
|
||||
Password = 1,
|
||||
// Send requires no auth
|
||||
None = 2,
|
||||
}
|
||||
|
||||
impl Send {
|
||||
pub fn new(atype: i32, name: String, data: String, akey: String, deletion_date: NaiveDateTime) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
@@ -145,6 +155,7 @@ impl Send {
|
||||
"maxAccessCount": self.max_access_count,
|
||||
"accessCount": self.access_count,
|
||||
"password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
|
||||
"authType": if self.password_hash.is_some() { SendAuthType::Password as i32 } else { SendAuthType::None as i32 },
|
||||
"disabled": self.disabled,
|
||||
"hideEmail": self.hide_email,
|
||||
|
||||
|
||||
@@ -54,11 +54,18 @@ pub struct SsoAuth {
|
||||
pub auth_response: Option<OIDCAuthenticatedUser>,
|
||||
pub created_at: NaiveDateTime,
|
||||
pub updated_at: NaiveDateTime,
|
||||
pub binding_hash: Option<String>,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl SsoAuth {
|
||||
pub fn new(state: OIDCState, client_challenge: OIDCCodeChallenge, nonce: String, redirect_uri: String) -> Self {
|
||||
pub fn new(
|
||||
state: OIDCState,
|
||||
client_challenge: OIDCCodeChallenge,
|
||||
nonce: String,
|
||||
redirect_uri: String,
|
||||
binding_hash: Option<String>,
|
||||
) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
SsoAuth {
|
||||
@@ -70,6 +77,7 @@ impl SsoAuth {
|
||||
updated_at: now,
|
||||
code_response: None,
|
||||
auth_response: None,
|
||||
binding_hash,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ pub struct TwoFactor {
|
||||
pub last_used: i64,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(num_derive::FromPrimitive)]
|
||||
pub enum TwoFactorType {
|
||||
Authenticator = 0,
|
||||
|
||||
+17
-4
@@ -185,13 +185,14 @@ impl User {
|
||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||
/// After these 2 minutes this stamp will expire.
|
||||
///
|
||||
pub fn set_password(
|
||||
pub async fn set_password(
|
||||
&mut self,
|
||||
password: &str,
|
||||
new_key: Option<String>,
|
||||
reset_security_stamp: bool,
|
||||
allow_next_route: Option<Vec<String>>,
|
||||
) {
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
self.password_hash = crypto::hash_password(password.as_bytes(), &self.salt, self.password_iterations as u32);
|
||||
|
||||
if let Some(route) = allow_next_route {
|
||||
@@ -203,12 +204,15 @@ impl User {
|
||||
}
|
||||
|
||||
if reset_security_stamp {
|
||||
self.reset_security_stamp()
|
||||
self.reset_security_stamp(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn reset_security_stamp(&mut self) {
|
||||
pub async fn reset_security_stamp(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.security_stamp = get_uuid();
|
||||
Device::rotate_refresh_tokens_by_user(&self.uuid, conn).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set the stamp_exception to only allow a subsequent request matching a specific route using the current security-stamp.
|
||||
@@ -231,6 +235,15 @@ impl User {
|
||||
pub fn reset_stamp_exception(&mut self) {
|
||||
self.stamp_exception = None;
|
||||
}
|
||||
|
||||
pub fn display_name(&self) -> &str {
|
||||
// default to email if name is empty
|
||||
if !&self.name.is_empty() {
|
||||
&self.name
|
||||
} else {
|
||||
&self.email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Database methods
|
||||
|
||||
@@ -265,6 +265,7 @@ table! {
|
||||
auth_response -> Nullable<Text>,
|
||||
created_at -> Timestamp,
|
||||
updated_at -> Timestamp,
|
||||
binding_hash -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,6 +342,16 @@ table! {
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
archives (user_uuid, cipher_uuid) {
|
||||
user_uuid -> Text,
|
||||
cipher_uuid -> Text,
|
||||
archived_at -> Timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
joinable!(archives -> users (user_uuid));
|
||||
joinable!(archives -> ciphers (cipher_uuid));
|
||||
joinable!(attachments -> ciphers (cipher_uuid));
|
||||
joinable!(ciphers -> organizations (organization_uuid));
|
||||
joinable!(ciphers -> users (user_uuid));
|
||||
@@ -372,6 +383,7 @@ joinable!(auth_requests -> users (user_uuid));
|
||||
joinable!(sso_users -> users (user_uuid));
|
||||
|
||||
allow_tables_to_appear_in_same_query!(
|
||||
archives,
|
||||
attachments,
|
||||
ciphers,
|
||||
ciphers_collections,
|
||||
|
||||
+290
-37
@@ -1,12 +1,11 @@
|
||||
use std::{
|
||||
fmt,
|
||||
net::{IpAddr, SocketAddr},
|
||||
str::FromStr,
|
||||
sync::{Arc, LazyLock, Mutex},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use hickory_resolver::{name_server::TokioConnectionProvider, TokioResolver};
|
||||
use hickory_resolver::{net::runtime::TokioRuntimeProvider, TokioResolver};
|
||||
use regex::Regex;
|
||||
use reqwest::{
|
||||
dns::{Name, Resolve, Resolving},
|
||||
@@ -59,16 +58,6 @@ pub fn get_reqwest_client_builder() -> ClientBuilder {
|
||||
.timeout(Duration::from_secs(10))
|
||||
}
|
||||
|
||||
pub fn should_block_address(domain_or_ip: &str) -> bool {
|
||||
if let Ok(ip) = IpAddr::from_str(domain_or_ip) {
|
||||
if should_block_ip(ip) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
should_block_address_regex(domain_or_ip)
|
||||
}
|
||||
|
||||
fn should_block_ip(ip: IpAddr) -> bool {
|
||||
if !CONFIG.http_request_block_non_global_ips() {
|
||||
return false;
|
||||
@@ -100,11 +89,54 @@ fn should_block_address_regex(domain_or_ip: &str) -> bool {
|
||||
is_match
|
||||
}
|
||||
|
||||
fn should_block_host(host: &Host<&str>) -> Result<(), CustomHttpClientError> {
|
||||
pub fn get_valid_host(host: &str) -> Result<Host, CustomHttpClientError> {
|
||||
let Ok(host) = Host::parse(host) else {
|
||||
return Err(CustomHttpClientError::Invalid {
|
||||
domain: host.to_string(),
|
||||
});
|
||||
};
|
||||
|
||||
// Some extra checks to validate hosts
|
||||
match host {
|
||||
Host::Domain(ref domain) => {
|
||||
// Host::parse() does not verify length or all possible invalid characters
|
||||
// We do some extra checks here to prevent issues
|
||||
if domain.len() > 253 {
|
||||
debug!("Domain validation error: '{domain}' exceeds 253 characters");
|
||||
return Err(CustomHttpClientError::Invalid {
|
||||
domain: host.to_string(),
|
||||
});
|
||||
}
|
||||
if !domain.split('.').all(|label| {
|
||||
!label.is_empty()
|
||||
// Labels can't be longer than 63 chars
|
||||
&& label.len() <= 63
|
||||
// Labels are not allowed to start or end with a hyphen `-`
|
||||
&& !label.starts_with('-')
|
||||
&& !label.ends_with('-')
|
||||
// Only ASCII Alphanumeric characters are allowed
|
||||
// We already received a punycoded domain back, so no unicode should exists here
|
||||
&& label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
|
||||
}) {
|
||||
debug!(
|
||||
"Domain validation error: '{domain}' labels contain invalid characters or exceed the maximum length"
|
||||
);
|
||||
return Err(CustomHttpClientError::Invalid {
|
||||
domain: host.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
Host::Ipv4(_) | Host::Ipv6(_) => {}
|
||||
}
|
||||
|
||||
Ok(host)
|
||||
}
|
||||
|
||||
pub fn should_block_host<S: AsRef<str>>(host: &Host<S>) -> Result<(), CustomHttpClientError> {
|
||||
let (ip, host_str): (Option<IpAddr>, String) = match host {
|
||||
Host::Ipv4(ip) => (Some(IpAddr::V4(*ip)), ip.to_string()),
|
||||
Host::Ipv6(ip) => (Some(IpAddr::V6(*ip)), ip.to_string()),
|
||||
Host::Domain(d) => (None, (*d).to_string()),
|
||||
Host::Domain(d) => (None, d.as_ref().to_string()),
|
||||
};
|
||||
|
||||
if let Some(ip) = ip {
|
||||
@@ -134,6 +166,9 @@ pub enum CustomHttpClientError {
|
||||
domain: Option<String>,
|
||||
ip: IpAddr,
|
||||
},
|
||||
Invalid {
|
||||
domain: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl CustomHttpClientError {
|
||||
@@ -155,7 +190,7 @@ impl fmt::Display for CustomHttpClientError {
|
||||
match self {
|
||||
Self::Blocked {
|
||||
domain,
|
||||
} => write!(f, "Blocked domain: {domain} matched HTTP_REQUEST_BLOCK_REGEX"),
|
||||
} => write!(f, "Blocked domain: '{domain}' matched HTTP_REQUEST_BLOCK_REGEX"),
|
||||
Self::NonGlobalIp {
|
||||
domain: Some(domain),
|
||||
ip,
|
||||
@@ -163,7 +198,10 @@ impl fmt::Display for CustomHttpClientError {
|
||||
Self::NonGlobalIp {
|
||||
domain: None,
|
||||
ip,
|
||||
} => write!(f, "IP {ip} is not a global IP!"),
|
||||
} => write!(f, "IP '{ip}' is not a global IP!"),
|
||||
Self::Invalid {
|
||||
domain,
|
||||
} => write!(f, "Invalid host: '{domain}' contains invalid characters or exceeds the maximum length"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -184,40 +222,46 @@ impl CustomDnsResolver {
|
||||
}
|
||||
|
||||
fn new() -> Arc<Self> {
|
||||
match TokioResolver::builder(TokioConnectionProvider::default()) {
|
||||
Ok(mut builder) => {
|
||||
if CONFIG.dns_prefer_ipv6() {
|
||||
builder.options_mut().ip_strategy = hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4;
|
||||
TokioResolver::builder(TokioRuntimeProvider::default())
|
||||
.and_then(|mut builder| {
|
||||
// Hickory's default since v0.26 is `Ipv6AndIpv4`, which sorts IPv6 first
|
||||
// This might cause issues on IPv4 only systems or containers
|
||||
// Unless someone enabled DNS_PREFER_IPV6, use Ipv4AndIpv6, which returns IPv4 first which was our previous default
|
||||
if !CONFIG.dns_prefer_ipv6() {
|
||||
builder.options_mut().ip_strategy = hickory_resolver::config::LookupIpStrategy::Ipv4AndIpv6;
|
||||
}
|
||||
let resolver = builder.build();
|
||||
Arc::new(Self::Hickory(Arc::new(resolver)))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
|
||||
Arc::new(Self::Default())
|
||||
}
|
||||
}
|
||||
builder.build()
|
||||
})
|
||||
.inspect_err(|e| warn!("Error creating Hickory resolver, falling back to default: {e:?}"))
|
||||
.map(|resolver| Arc::new(Self::Hickory(Arc::new(resolver))))
|
||||
.unwrap_or_else(|_| Arc::new(Self::Default()))
|
||||
}
|
||||
|
||||
// Note that we get an iterator of addresses, but we only grab the first one for convenience
|
||||
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
|
||||
async fn resolve_domain(&self, name: &str) -> Result<Vec<SocketAddr>, BoxError> {
|
||||
pre_resolve(name)?;
|
||||
|
||||
let result = match self {
|
||||
Self::Default() => tokio::net::lookup_host(name).await?.next(),
|
||||
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
|
||||
let results: Vec<SocketAddr> = match self {
|
||||
Self::Default() => tokio::net::lookup_host((name, 0)).await?.collect(),
|
||||
Self::Hickory(r) => r.lookup_ip(name).await?.iter().map(|i| SocketAddr::new(i, 0)).collect(),
|
||||
};
|
||||
|
||||
if let Some(addr) = &result {
|
||||
for addr in &results {
|
||||
post_resolve(name, addr.ip())?;
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
Ok(results)
|
||||
}
|
||||
}
|
||||
|
||||
fn pre_resolve(name: &str) -> Result<(), CustomHttpClientError> {
|
||||
if should_block_address(name) {
|
||||
let Ok(host) = get_valid_host(name) else {
|
||||
return Err(CustomHttpClientError::Invalid {
|
||||
domain: name.to_string(),
|
||||
});
|
||||
};
|
||||
|
||||
if should_block_host(&host).is_err() {
|
||||
return Err(CustomHttpClientError::Blocked {
|
||||
domain: name.to_string(),
|
||||
});
|
||||
@@ -242,8 +286,11 @@ impl Resolve for CustomDnsResolver {
|
||||
let this = self.clone();
|
||||
Box::pin(async move {
|
||||
let name = name.as_str();
|
||||
let result = this.resolve_domain(name).await?;
|
||||
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
|
||||
let results = this.resolve_domain(name).await?;
|
||||
if results.is_empty() {
|
||||
warn!("Unable to resolve {name} to any valid IP address");
|
||||
}
|
||||
Ok::<reqwest::dns::Addrs, _>(Box::new(results.into_iter()))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -305,3 +352,209 @@ pub(crate) mod aws {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::util::is_global_hardcoded;
|
||||
use std::net::Ipv4Addr;
|
||||
use url::Host;
|
||||
|
||||
// ===
|
||||
// IPv4 numeric-format normalization
|
||||
fn parse_to_ip(s: &str) -> Option<IpAddr> {
|
||||
match Host::parse(s).ok()? {
|
||||
Host::Ipv4(v4) => Some(IpAddr::V4(v4)),
|
||||
Host::Ipv6(v6) => Some(IpAddr::V6(v6)),
|
||||
Host::Domain(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dotted_decimal_loopback_normalizes() {
|
||||
let ip = parse_to_ip("127.0.0.1").unwrap();
|
||||
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
|
||||
assert!(!is_global_hardcoded(ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_decimal_loopback_normalizes() {
|
||||
// 127.0.0.1 == 2130706433
|
||||
let ip = parse_to_ip("2130706433").unwrap();
|
||||
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
|
||||
assert!(!is_global_hardcoded(ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_loopback_normalizes() {
|
||||
let ip = parse_to_ip("0x7f000001").unwrap();
|
||||
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
|
||||
assert!(!is_global_hardcoded(ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dotted_hex_loopback_normalizes() {
|
||||
let ip = parse_to_ip("0x7f.0.0.1").unwrap();
|
||||
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
|
||||
assert!(!is_global_hardcoded(ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn octal_loopback_normalizes() {
|
||||
// 017700000001 == 127.0.0.1
|
||||
let ip = parse_to_ip("017700000001").unwrap();
|
||||
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
|
||||
assert!(!is_global_hardcoded(ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dotted_octal_loopback_normalizes() {
|
||||
let ip = parse_to_ip("0177.0.0.01").unwrap();
|
||||
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
|
||||
assert!(!is_global_hardcoded(ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn aws_metadata_decimal_blocked() {
|
||||
// 169.254.169.254 == 2852039166 (link-local, AWS IMDS)
|
||||
let ip = parse_to_ip("2852039166").unwrap();
|
||||
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(169, 254, 169, 254)));
|
||||
assert!(!is_global_hardcoded(ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rfc1918_hex_blocked() {
|
||||
// 10.0.0.1
|
||||
let ip = parse_to_ip("0x0a000001").unwrap();
|
||||
assert!(!is_global_hardcoded(ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn public_ip_decimal_allowed() {
|
||||
// 8.8.8.8 == 134744072
|
||||
let ip = parse_to_ip("134744072").unwrap();
|
||||
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)));
|
||||
assert!(is_global_hardcoded(ip));
|
||||
}
|
||||
|
||||
// ===
|
||||
// get_valid_host integration: numeric forms become Host::Ipv4
|
||||
#[test]
|
||||
fn get_valid_host_normalizes_decimal_int() {
|
||||
let h = get_valid_host("2130706433").expect("valid");
|
||||
assert!(matches!(h, Host::Ipv4(ip) if ip == Ipv4Addr::new(127, 0, 0, 1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_valid_host_normalizes_hex() {
|
||||
let h = get_valid_host("0x7f000001").expect("valid");
|
||||
assert!(matches!(h, Host::Ipv4(ip) if ip == Ipv4Addr::new(127, 0, 0, 1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_valid_host_normalizes_octal() {
|
||||
let h = get_valid_host("017700000001").expect("valid");
|
||||
assert!(matches!(h, Host::Ipv4(ip) if ip == Ipv4Addr::new(127, 0, 0, 1)));
|
||||
}
|
||||
|
||||
// ===
|
||||
// IPv6 formats
|
||||
#[test]
|
||||
fn ipv6_loopback_blocked() {
|
||||
let h = get_valid_host("[::1]").expect("valid");
|
||||
let Host::Ipv6(ip) = h else {
|
||||
panic!("expected v6")
|
||||
};
|
||||
assert!(!is_global_hardcoded(IpAddr::V6(ip)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ipv4_mapped_in_ipv6_loopback_blocked() {
|
||||
// ::ffff:127.0.0.1 — v4-mapped form; is_global_hardcoded blocks via ::ffff:0:0/96
|
||||
let h = get_valid_host("[::ffff:127.0.0.1]").expect("valid");
|
||||
let Host::Ipv6(ip) = h else {
|
||||
panic!("expected v6")
|
||||
};
|
||||
assert!(!is_global_hardcoded(IpAddr::V6(ip)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ipv6_unique_local_blocked() {
|
||||
let h = get_valid_host("[fc00::1]").expect("valid");
|
||||
let Host::Ipv6(ip) = h else {
|
||||
panic!("expected v6")
|
||||
};
|
||||
assert!(!is_global_hardcoded(IpAddr::V6(ip)));
|
||||
}
|
||||
|
||||
// ===
|
||||
// Punycode / IDN
|
||||
#[test]
|
||||
fn punycode_passthrough() {
|
||||
let h = get_valid_host("xn--deadbeafcaf-lbb.test").expect("valid");
|
||||
match h {
|
||||
Host::Domain(d) => assert_eq!(d, "xn--deadbeafcaf-lbb.test"),
|
||||
_ => panic!("expected domain"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn idn_unicode_gets_punycoded() {
|
||||
let h = get_valid_host("deadbeafcafé.test").expect("valid");
|
||||
match h {
|
||||
Host::Domain(d) => assert_eq!(d, "xn--deadbeafcaf-lbb.test"),
|
||||
_ => panic!("expected domain"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn idn_unicode_gets_punycoded_tld() {
|
||||
let h = get_valid_host("deadbeaf.café").expect("valid");
|
||||
match h {
|
||||
Host::Domain(d) => assert_eq!(d, "deadbeaf.xn--caf-dma"),
|
||||
_ => panic!("expected domain"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn idn_emoji_gets_punycoded() {
|
||||
let h = get_valid_host("xn--t88h.test").expect("valid"); // 🛡️.test
|
||||
match h {
|
||||
Host::Domain(d) => assert_eq!(d, "xn--t88h.test"),
|
||||
_ => panic!("expected domain"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn idn_unicode_to_punycode_roundtrip() {
|
||||
let from_unicode = get_valid_host("🛡️.test").expect("valid");
|
||||
let from_puny = get_valid_host("xn--t88h.test").expect("valid");
|
||||
match (from_unicode, from_puny) {
|
||||
(Host::Domain(a), Host::Domain(b)) => assert_eq!(a, b),
|
||||
_ => panic!("expected domains"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_punycode_rejected() {
|
||||
// bare invalid punycode
|
||||
assert!(get_valid_host("xn--").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn underscore_in_label_rejected() {
|
||||
assert!(get_valid_host("dead_beaf.cafe").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn label_too_long_rejected() {
|
||||
let label = "a".repeat(64);
|
||||
assert!(get_valid_host(&format!("{label}.test")).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn domain_too_long_rejected() {
|
||||
let big = "a.".repeat(130) + "test"; // > 253
|
||||
assert!(get_valid_host(&big).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
+3
-3
@@ -302,10 +302,10 @@ pub async fn send_invite(
|
||||
.append_pair("organizationUserId", &member_id)
|
||||
.append_pair("token", &invite_token);
|
||||
|
||||
if CONFIG.sso_enabled() {
|
||||
query_params.append_pair("orgUserHasExistingUser", "false");
|
||||
if CONFIG.sso_enabled() && CONFIG.sso_only() {
|
||||
query_params.append_pair("orgSsoIdentifier", &org_id);
|
||||
} else if user.private_key.is_some() {
|
||||
}
|
||||
if user.private_key.is_some() {
|
||||
query_params.append_pair("orgUserHasExistingUser", "true");
|
||||
}
|
||||
}
|
||||
|
||||
+37
-6
@@ -126,7 +126,7 @@ fn parse_args() {
|
||||
exit(0);
|
||||
} else if pargs.contains(["-v", "--version"]) {
|
||||
config::SKIP_CONFIG_VALIDATION.store(true, Ordering::Relaxed);
|
||||
let web_vault_version = util::get_web_vault_version();
|
||||
let web_vault_version = util::get_active_web_release();
|
||||
println!("Vaultwarden {version}");
|
||||
println!("Web-Vault {web_vault_version}");
|
||||
exit(0);
|
||||
@@ -558,6 +558,12 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
|
||||
let basepath = &CONFIG.domain_path();
|
||||
|
||||
let mut config = rocket::Config::from(rocket::Config::figment());
|
||||
|
||||
// We install our own signal handlers below; disable Rocket's built-in handlers
|
||||
config.shutdown.ctrlc = false;
|
||||
#[cfg(unix)]
|
||||
config.shutdown.signals.clear();
|
||||
|
||||
config.temp_dir = canonicalize(CONFIG.tmp_folder()).unwrap().into();
|
||||
config.cli_colors = false; // Make sure Rocket does not color any values for logging.
|
||||
config.limits = Limits::new()
|
||||
@@ -589,11 +595,7 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
|
||||
|
||||
CONFIG.set_rocket_shutdown_handle(instance.shutdown());
|
||||
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.expect("Error setting Ctrl-C handler");
|
||||
info!("Exiting Vaultwarden!");
|
||||
CONFIG.shutdown();
|
||||
});
|
||||
spawn_shutdown_signal_handler();
|
||||
|
||||
#[cfg(all(unix, sqlite))]
|
||||
{
|
||||
@@ -621,6 +623,35 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn spawn_shutdown_signal_handler() {
|
||||
tokio::spawn(async move {
|
||||
use tokio::signal::unix::signal;
|
||||
|
||||
let mut sigint = signal(SignalKind::interrupt()).expect("Error setting SIGINT handler");
|
||||
let mut sigterm = signal(SignalKind::terminate()).expect("Error setting SIGTERM handler");
|
||||
let mut sigquit = signal(SignalKind::quit()).expect("Error setting SIGQUIT handler");
|
||||
|
||||
let signal_name = tokio::select! {
|
||||
_ = sigint.recv() => "SIGINT",
|
||||
_ = sigterm.recv() => "SIGTERM",
|
||||
_ = sigquit.recv() => "SIGQUIT",
|
||||
};
|
||||
|
||||
info!("Received {signal_name}, initiating graceful shutdown");
|
||||
CONFIG.shutdown();
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn spawn_shutdown_signal_handler() {
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.expect("Error setting Ctrl-C handler");
|
||||
info!("Received Ctrl-C, initiating graceful shutdown");
|
||||
CONFIG.shutdown();
|
||||
});
|
||||
}
|
||||
|
||||
fn schedule_jobs(pool: db::DbPool) {
|
||||
if CONFIG.job_poll_interval_ms() == 0 {
|
||||
info!("Job scheduler disabled.");
|
||||
|
||||
+4
-3
@@ -17,7 +17,7 @@ use crate::{
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub static FAKE_IDENTIFIER: &str = "VW_DUMMY_IDENTIFIER_FOR_OIDC";
|
||||
pub static FAKE_SSO_IDENTIFIER: &str = "00000000-01DC-01DC-01DC-000000000000";
|
||||
|
||||
static SSO_JWT_ISSUER: LazyLock<String> = LazyLock::new(|| format!("{}|sso", CONFIG.domain_origin()));
|
||||
|
||||
@@ -188,6 +188,7 @@ pub async fn authorize_url(
|
||||
client_challenge: OIDCCodeChallenge,
|
||||
client_id: &str,
|
||||
raw_redirect_uri: &str,
|
||||
binding_hash: Option<String>,
|
||||
conn: DbConn,
|
||||
) -> ApiResult<Url> {
|
||||
let redirect_uri = match client_id {
|
||||
@@ -203,7 +204,7 @@ pub async fn authorize_url(
|
||||
_ => err!(format!("Unsupported client {client_id}")),
|
||||
};
|
||||
|
||||
let (auth_url, sso_auth) = Client::authorize_url(state, client_challenge, redirect_uri).await?;
|
||||
let (auth_url, sso_auth) = Client::authorize_url(state, client_challenge, redirect_uri, binding_hash).await?;
|
||||
sso_auth.save(&conn).await?;
|
||||
Ok(auth_url)
|
||||
}
|
||||
@@ -283,7 +284,7 @@ pub async fn exchange_code(
|
||||
|
||||
let email_verified = id_claims.email_verified().or(user_info.email_verified());
|
||||
|
||||
let user_name = id_claims.preferred_username().map(|un| un.to_string());
|
||||
let user_name = id_claims.preferred_username().or(user_info.preferred_username()).map(|un| un.to_string());
|
||||
|
||||
let refresh_token = token_response.refresh_token().map(|t| t.secret());
|
||||
if refresh_token.is_none() && CONFIG.sso_scopes_vec().contains(&"offline_access".to_string()) {
|
||||
|
||||
+32
-19
@@ -1,6 +1,5 @@
|
||||
use std::{borrow::Cow, sync::LazyLock, time::Duration};
|
||||
|
||||
use mini_moka::sync::Cache;
|
||||
use openidconnect::{core::*, reqwest, *};
|
||||
use regex::Regex;
|
||||
use url::Url;
|
||||
@@ -13,9 +12,14 @@ use crate::{
|
||||
};
|
||||
|
||||
static CLIENT_CACHE_KEY: LazyLock<String> = LazyLock::new(|| "sso-client".to_string());
|
||||
static CLIENT_CACHE: LazyLock<Cache<String, Client>> = LazyLock::new(|| {
|
||||
Cache::builder().max_capacity(1).time_to_live(Duration::from_secs(CONFIG.sso_client_cache_expiration())).build()
|
||||
static CLIENT_CACHE: LazyLock<moka::sync::Cache<String, Client>> = LazyLock::new(|| {
|
||||
moka::sync::Cache::builder()
|
||||
.max_capacity(1)
|
||||
.time_to_live(Duration::from_secs(CONFIG.sso_client_cache_expiration()))
|
||||
.build()
|
||||
});
|
||||
static REFRESH_CACHE: LazyLock<moka::future::Cache<String, Result<RefreshTokenResponse, String>>> =
|
||||
LazyLock::new(|| moka::future::Cache::builder().max_capacity(1000).time_to_live(Duration::from_secs(30)).build());
|
||||
|
||||
/// OpenID Connect Core client.
|
||||
pub type CustomClient = openidconnect::Client<
|
||||
@@ -38,6 +42,8 @@ pub type CustomClient = openidconnect::Client<
|
||||
EndpointSet,
|
||||
>;
|
||||
|
||||
pub type RefreshTokenResponse = (Option<String>, String, Option<Duration>);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Client {
|
||||
pub http_client: reqwest::Client,
|
||||
@@ -111,6 +117,7 @@ impl Client {
|
||||
state: OIDCState,
|
||||
client_challenge: OIDCCodeChallenge,
|
||||
redirect_uri: String,
|
||||
binding_hash: Option<String>,
|
||||
) -> ApiResult<(Url, SsoAuth)> {
|
||||
let scopes = CONFIG.sso_scopes_vec().into_iter().map(Scope::new);
|
||||
let base64_state = data_encoding::BASE64.encode(state.to_string().as_bytes());
|
||||
@@ -133,7 +140,7 @@ impl Client {
|
||||
}
|
||||
|
||||
let (auth_url, _, nonce) = auth_req.url();
|
||||
Ok((auth_url, SsoAuth::new(state, client_challenge, nonce.secret().clone(), redirect_uri)))
|
||||
Ok((auth_url, SsoAuth::new(state, client_challenge, nonce.secret().clone(), redirect_uri, binding_hash)))
|
||||
}
|
||||
|
||||
pub async fn exchange_code(
|
||||
@@ -231,23 +238,29 @@ impl Client {
|
||||
verifier
|
||||
}
|
||||
|
||||
pub async fn exchange_refresh_token(
|
||||
refresh_token: String,
|
||||
) -> ApiResult<(Option<String>, String, Option<Duration>)> {
|
||||
pub async fn exchange_refresh_token(refresh_token: String) -> ApiResult<RefreshTokenResponse> {
|
||||
let client = Client::cached().await?;
|
||||
|
||||
REFRESH_CACHE
|
||||
.get_with(refresh_token.clone(), async move { client._exchange_refresh_token(refresh_token).await })
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn _exchange_refresh_token(&self, refresh_token: String) -> Result<RefreshTokenResponse, String> {
|
||||
let rt = RefreshToken::new(refresh_token);
|
||||
|
||||
let client = Client::cached().await?;
|
||||
let token_response =
|
||||
match client.core_client.exchange_refresh_token(&rt).request_async(&client.http_client).await {
|
||||
Err(err) => err!(format!("Request to exchange_refresh_token endpoint failed: {:?}", err)),
|
||||
Ok(token_response) => token_response,
|
||||
};
|
||||
|
||||
Ok((
|
||||
token_response.refresh_token().map(|token| token.secret().clone()),
|
||||
token_response.access_token().secret().clone(),
|
||||
token_response.expires_in(),
|
||||
))
|
||||
match self.core_client.exchange_refresh_token(&rt).request_async(&self.http_client).await {
|
||||
Err(err) => {
|
||||
error!("Request to exchange_refresh_token endpoint failed: {err}");
|
||||
Err(format!("Request to exchange_refresh_token endpoint failed: {err}"))
|
||||
}
|
||||
Ok(token_response) => Ok((
|
||||
token_response.refresh_token().map(|token| token.secret().clone()),
|
||||
token_response.access_token().secret().clone(),
|
||||
token_response.expires_in(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -111,7 +111,8 @@
|
||||
"microsoftstore.com",
|
||||
"xbox.com",
|
||||
"azure.com",
|
||||
"windowsazure.com"
|
||||
"windowsazure.com",
|
||||
"cloud.microsoft"
|
||||
],
|
||||
"excluded": false
|
||||
},
|
||||
@@ -971,5 +972,13 @@
|
||||
"pinterest.se"
|
||||
],
|
||||
"excluded": false
|
||||
},
|
||||
{
|
||||
"type": 91,
|
||||
"domains": [
|
||||
"twitter.com",
|
||||
"x.com"
|
||||
],
|
||||
"excluded": false
|
||||
}
|
||||
]
|
||||
]
|
||||
|
||||
Vendored
+13
-2
@@ -1,6 +1,17 @@
|
||||
body {
|
||||
padding-top: 75px;
|
||||
}
|
||||
/* Some extra width's for the main layout */
|
||||
@media (min-width: 1600px) {
|
||||
.container-xxl {
|
||||
max-width: 1520px;
|
||||
}
|
||||
}
|
||||
@media (min-width: 1800px) {
|
||||
.container-xxl {
|
||||
max-width: 1720px;
|
||||
}
|
||||
}
|
||||
img {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
@@ -38,8 +49,8 @@ img {
|
||||
max-width: 130px;
|
||||
}
|
||||
#users-table .vw-actions, #orgs-table .vw-actions {
|
||||
min-width: 155px;
|
||||
max-width: 160px;
|
||||
min-width: 170px;
|
||||
max-width: 180px;
|
||||
}
|
||||
#users-table .vw-org-cell {
|
||||
max-height: 120px;
|
||||
|
||||
+13
-6
@@ -29,7 +29,7 @@ function isValidIp(ip) {
|
||||
return ipv4Regex.test(ip) || ipv6Regex.test(ip);
|
||||
}
|
||||
|
||||
function checkVersions(platform, installed, latest, commit=null, pre_release=false) {
|
||||
function checkVersions(platform, installed, latest, commit=null, compare_order=0) {
|
||||
if (installed === "-" || latest === "-") {
|
||||
document.getElementById(`${platform}-failed`).classList.remove("d-none");
|
||||
return;
|
||||
@@ -37,7 +37,7 @@ function checkVersions(platform, installed, latest, commit=null, pre_release=fal
|
||||
|
||||
// Only check basic versions, no commit revisions
|
||||
if (commit === null || installed.indexOf("-") === -1) {
|
||||
if (platform === "web" && pre_release === true) {
|
||||
if (platform === "web" && compare_order === 1) {
|
||||
document.getElementById(`${platform}-prerelease`).classList.remove("d-none");
|
||||
} else if (installed == latest) {
|
||||
document.getElementById(`${platform}-success`).classList.remove("d-none");
|
||||
@@ -83,7 +83,7 @@ async function generateSupportString(event, dj) {
|
||||
let supportString = "### Your environment (Generated via diagnostics page)\n\n";
|
||||
|
||||
supportString += `* Vaultwarden version: v${dj.current_release}\n`;
|
||||
supportString += `* Web-vault version: v${dj.web_vault_version}\n`;
|
||||
supportString += `* Web-vault version: v${dj.active_web_release}\n`;
|
||||
supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`;
|
||||
supportString += `* Running within a container: ${dj.running_within_container} (Base: ${dj.container_base_image})\n`;
|
||||
supportString += `* Database type: ${dj.db_type}\n`;
|
||||
@@ -109,6 +109,9 @@ async function generateSupportString(event, dj) {
|
||||
supportString += "* Websocket Check: disabled\n";
|
||||
}
|
||||
supportString += `* HTTP Response Checks: ${httpResponseCheck}\n`;
|
||||
if (dj.invalid_feature_flags != "") {
|
||||
supportString += `* Invalid feature flags: true\n`;
|
||||
}
|
||||
|
||||
const jsonResponse = await fetch(`${BASE_URL}/admin/diagnostics/config`, {
|
||||
"headers": { "Accept": "application/json" }
|
||||
@@ -128,6 +131,10 @@ async function generateSupportString(event, dj) {
|
||||
supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
|
||||
}
|
||||
|
||||
if (dj.invalid_feature_flags != "") {
|
||||
supportString += `\n**Invalid feature flags:** ${dj.invalid_feature_flags}\n`;
|
||||
}
|
||||
|
||||
// Add http response check messages if they exists
|
||||
if (httpResponseCheck === false) {
|
||||
supportString += "\n**Failed HTTP Checks:**\n";
|
||||
@@ -208,9 +215,9 @@ function initVersionCheck(dj) {
|
||||
}
|
||||
checkVersions("server", serverInstalled, serverLatest, serverLatestCommit);
|
||||
|
||||
const webInstalled = dj.web_vault_version;
|
||||
const webLatest = dj.latest_web_build;
|
||||
checkVersions("web", webInstalled, webLatest, null, dj.web_vault_pre_release);
|
||||
const webInstalled = dj.active_web_release;
|
||||
const webLatest = dj.latest_web_release;
|
||||
checkVersions("web", webInstalled, webLatest, null, dj.web_vault_compare);
|
||||
}
|
||||
|
||||
function checkDns(dns_resolved) {
|
||||
|
||||
Vendored
+56
-55
@@ -4,10 +4,10 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs5/dt-2.3.5
|
||||
* https://datatables.net/download/#bs5/dt-2.3.7
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 2.3.5
|
||||
* DataTables 2.3.7
|
||||
*/
|
||||
|
||||
:root {
|
||||
@@ -88,42 +88,42 @@ table.dataTable thead > tr > th:active,
|
||||
table.dataTable thead > tr > td:active {
|
||||
outline: none;
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before {
|
||||
table.dataTable thead > tr > th.dt-orderable-asc .dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc .dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order:before {
|
||||
position: absolute;
|
||||
display: block;
|
||||
bottom: 50%;
|
||||
content: "\25B2";
|
||||
content: "\25B2"/"";
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
|
||||
table.dataTable thead > tr > th.dt-orderable-desc .dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc .dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order:after {
|
||||
position: absolute;
|
||||
display: block;
|
||||
top: 50%;
|
||||
content: "\25BC";
|
||||
content: "\25BC"/"";
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order {
|
||||
table.dataTable thead > tr > th.dt-orderable-asc .dt-column-order, table.dataTable thead > tr > th.dt-orderable-desc .dt-column-order, table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc .dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc .dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order {
|
||||
position: relative;
|
||||
width: 12px;
|
||||
height: 24px;
|
||||
height: 20px;
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:after, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
|
||||
table.dataTable thead > tr > th.dt-orderable-asc .dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-asc .dt-column-order:after, table.dataTable thead > tr > th.dt-orderable-desc .dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-desc .dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc .dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-orderable-asc .dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc .dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-orderable-desc .dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order:after {
|
||||
left: 0;
|
||||
opacity: 0.125;
|
||||
line-height: 9px;
|
||||
@@ -140,15 +140,15 @@ table.dataTable thead > tr > td.dt-orderable-desc:hover {
|
||||
outline: 2px solid rgba(0, 0, 0, 0.05);
|
||||
outline-offset: -2px;
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
|
||||
table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order:after,
|
||||
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order:after {
|
||||
opacity: 0.6;
|
||||
}
|
||||
table.dataTable thead > tr > th.dt-orderable-none:not(.dt-ordering-asc, .dt-ordering-desc) span.dt-column-order:empty, table.dataTable thead > tr > th.sorting_desc_disabled span.dt-column-order:after, table.dataTable thead > tr > th.sorting_asc_disabled span.dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-orderable-none:not(.dt-ordering-asc, .dt-ordering-desc) span.dt-column-order:empty,
|
||||
table.dataTable thead > tr > td.sorting_desc_disabled span.dt-column-order:after,
|
||||
table.dataTable thead > tr > td.sorting_asc_disabled span.dt-column-order:before {
|
||||
table.dataTable thead > tr > th.dt-orderable-none:not(.dt-ordering-asc, .dt-ordering-desc) .dt-column-order:empty, table.dataTable thead > tr > th.sorting_desc_disabled .dt-column-order:after, table.dataTable thead > tr > th.sorting_asc_disabled .dt-column-order:before,
|
||||
table.dataTable thead > tr > td.dt-orderable-none:not(.dt-ordering-asc, .dt-ordering-desc) .dt-column-order:empty,
|
||||
table.dataTable thead > tr > td.sorting_desc_disabled .dt-column-order:after,
|
||||
table.dataTable thead > tr > td.sorting_asc_disabled .dt-column-order:before {
|
||||
display: none;
|
||||
}
|
||||
table.dataTable thead > tr > th:active,
|
||||
@@ -169,24 +169,24 @@ table.dataTable tfoot > tr > td div.dt-column-footer {
|
||||
align-items: var(--dt-header-align-items);
|
||||
gap: 4px;
|
||||
}
|
||||
table.dataTable thead > tr > th div.dt-column-header span.dt-column-title,
|
||||
table.dataTable thead > tr > th div.dt-column-footer span.dt-column-title,
|
||||
table.dataTable thead > tr > td div.dt-column-header span.dt-column-title,
|
||||
table.dataTable thead > tr > td div.dt-column-footer span.dt-column-title,
|
||||
table.dataTable tfoot > tr > th div.dt-column-header span.dt-column-title,
|
||||
table.dataTable tfoot > tr > th div.dt-column-footer span.dt-column-title,
|
||||
table.dataTable tfoot > tr > td div.dt-column-header span.dt-column-title,
|
||||
table.dataTable tfoot > tr > td div.dt-column-footer span.dt-column-title {
|
||||
table.dataTable thead > tr > th div.dt-column-header .dt-column-title,
|
||||
table.dataTable thead > tr > th div.dt-column-footer .dt-column-title,
|
||||
table.dataTable thead > tr > td div.dt-column-header .dt-column-title,
|
||||
table.dataTable thead > tr > td div.dt-column-footer .dt-column-title,
|
||||
table.dataTable tfoot > tr > th div.dt-column-header .dt-column-title,
|
||||
table.dataTable tfoot > tr > th div.dt-column-footer .dt-column-title,
|
||||
table.dataTable tfoot > tr > td div.dt-column-header .dt-column-title,
|
||||
table.dataTable tfoot > tr > td div.dt-column-footer .dt-column-title {
|
||||
flex-grow: 1;
|
||||
}
|
||||
table.dataTable thead > tr > th div.dt-column-header span.dt-column-title:empty,
|
||||
table.dataTable thead > tr > th div.dt-column-footer span.dt-column-title:empty,
|
||||
table.dataTable thead > tr > td div.dt-column-header span.dt-column-title:empty,
|
||||
table.dataTable thead > tr > td div.dt-column-footer span.dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > th div.dt-column-header span.dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > th div.dt-column-footer span.dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > td div.dt-column-header span.dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > td div.dt-column-footer span.dt-column-title:empty {
|
||||
table.dataTable thead > tr > th div.dt-column-header .dt-column-title:empty,
|
||||
table.dataTable thead > tr > th div.dt-column-footer .dt-column-title:empty,
|
||||
table.dataTable thead > tr > td div.dt-column-header .dt-column-title:empty,
|
||||
table.dataTable thead > tr > td div.dt-column-footer .dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > th div.dt-column-header .dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > th div.dt-column-footer .dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > td div.dt-column-header .dt-column-title:empty,
|
||||
table.dataTable tfoot > tr > td div.dt-column-footer .dt-column-title:empty {
|
||||
display: none;
|
||||
}
|
||||
|
||||
@@ -588,16 +588,16 @@ table.dataTable.table-sm > thead > tr td.dt-ordering-asc,
|
||||
table.dataTable.table-sm > thead > tr td.dt-ordering-desc {
|
||||
padding-right: 0.25rem;
|
||||
}
|
||||
table.dataTable.table-sm > thead > tr th.dt-orderable-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-orderable-desc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-desc span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-orderable-asc span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-orderable-desc span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-ordering-asc span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-ordering-desc span.dt-column-order {
|
||||
table.dataTable.table-sm > thead > tr th.dt-orderable-asc .dt-column-order, table.dataTable.table-sm > thead > tr th.dt-orderable-desc .dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-asc .dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-desc .dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-orderable-asc .dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-orderable-desc .dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-ordering-asc .dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-ordering-desc .dt-column-order {
|
||||
right: 0.25rem;
|
||||
}
|
||||
table.dataTable.table-sm > thead > tr th.dt-type-date span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-type-numeric span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-type-date span.dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-type-numeric span.dt-column-order {
|
||||
table.dataTable.table-sm > thead > tr th.dt-type-date .dt-column-order, table.dataTable.table-sm > thead > tr th.dt-type-numeric .dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-type-date .dt-column-order,
|
||||
table.dataTable.table-sm > thead > tr td.dt-type-numeric .dt-column-order {
|
||||
left: 0.25rem;
|
||||
}
|
||||
|
||||
@@ -606,7 +606,8 @@ div.dt-scroll-head table.table-bordered {
|
||||
}
|
||||
|
||||
div.table-responsive > div.dt-container > div.row {
|
||||
margin: 0;
|
||||
margin-left: 0;
|
||||
margin-right: 0;
|
||||
}
|
||||
div.table-responsive > div.dt-container > div.row > div[class^=col-]:first-child {
|
||||
padding-left: 0;
|
||||
|
||||
Vendored
+56
-28
@@ -4,13 +4,13 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs5/dt-2.3.5
|
||||
* https://datatables.net/download/#bs5/dt-2.3.7
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 2.3.5
|
||||
* DataTables 2.3.7
|
||||
*/
|
||||
|
||||
/*! DataTables 2.3.5
|
||||
/*! DataTables 2.3.7
|
||||
* © SpryMedia Ltd - datatables.net/license
|
||||
*/
|
||||
|
||||
@@ -186,7 +186,7 @@
|
||||
"sDestroyWidth": $this[0].style.width,
|
||||
"sInstance": sId,
|
||||
"sTableId": sId,
|
||||
colgroup: $('<colgroup>').prependTo(this),
|
||||
colgroup: $('<colgroup>'),
|
||||
fastData: function (row, column, type) {
|
||||
return _fnGetCellData(oSettings, row, column, type);
|
||||
}
|
||||
@@ -259,6 +259,7 @@
|
||||
"orderHandler",
|
||||
"titleRow",
|
||||
"typeDetect",
|
||||
"columnTitleTag",
|
||||
[ "iCookieDuration", "iStateDuration" ], // backwards compat
|
||||
[ "oSearch", "oPreviousSearch" ],
|
||||
[ "aoSearchCols", "aoPreSearchCols" ],
|
||||
@@ -423,7 +424,7 @@
|
||||
|
||||
if ( oSettings.caption ) {
|
||||
if ( caption.length === 0 ) {
|
||||
caption = $('<caption/>').appendTo( $this );
|
||||
caption = $('<caption/>').prependTo( $this );
|
||||
}
|
||||
|
||||
caption.html( oSettings.caption );
|
||||
@@ -436,6 +437,14 @@
|
||||
oSettings.captionNode = caption[0];
|
||||
}
|
||||
|
||||
// Place the colgroup element in the correct location for the HTML structure
|
||||
if (caption.length) {
|
||||
oSettings.colgroup.insertAfter(caption);
|
||||
}
|
||||
else {
|
||||
oSettings.colgroup.prependTo(oSettings.nTable);
|
||||
}
|
||||
|
||||
if ( thead.length === 0 ) {
|
||||
thead = $('<thead/>').appendTo($this);
|
||||
}
|
||||
@@ -516,7 +525,7 @@
|
||||
*
|
||||
* @type string
|
||||
*/
|
||||
builder: "bs5/dt-2.3.5",
|
||||
builder: "bs5/dt-2.3.7",
|
||||
|
||||
/**
|
||||
* Buttons. For use with the Buttons extension for DataTables. This is
|
||||
@@ -1292,7 +1301,7 @@
|
||||
};
|
||||
|
||||
// Replaceable function in api.util
|
||||
var _stripHtml = function (input) {
|
||||
var _stripHtml = function (input, replacement) {
|
||||
if (! input || typeof input !== 'string') {
|
||||
return input;
|
||||
}
|
||||
@@ -1304,7 +1313,7 @@
|
||||
|
||||
var previous;
|
||||
|
||||
input = input.replace(_re_html, ''); // Complete tags
|
||||
input = input.replace(_re_html, replacement || ''); // Complete tags
|
||||
|
||||
// Safety for incomplete script tag - use do / while to ensure that
|
||||
// we get all instances
|
||||
@@ -1769,7 +1778,7 @@
|
||||
}
|
||||
},
|
||||
|
||||
stripHtml: function (mixed) {
|
||||
stripHtml: function (mixed, replacement) {
|
||||
var type = typeof mixed;
|
||||
|
||||
if (type === 'function') {
|
||||
@@ -1777,7 +1786,7 @@
|
||||
return;
|
||||
}
|
||||
else if (type === 'string') {
|
||||
return _stripHtml(mixed);
|
||||
return _stripHtml(mixed, replacement);
|
||||
}
|
||||
return mixed;
|
||||
},
|
||||
@@ -3379,7 +3388,7 @@
|
||||
colspan++;
|
||||
}
|
||||
|
||||
var titleSpan = $('span.dt-column-title', cell);
|
||||
var titleSpan = $('.dt-column-title', cell);
|
||||
|
||||
structure[row][column] = {
|
||||
cell: cell,
|
||||
@@ -4093,8 +4102,8 @@
|
||||
}
|
||||
|
||||
// Wrap the column title so we can write to it in future
|
||||
if ( $('span.dt-column-title', cell).length === 0) {
|
||||
$('<span>')
|
||||
if ( $('.dt-column-title', cell).length === 0) {
|
||||
$(document.createElement(settings.columnTitleTag))
|
||||
.addClass('dt-column-title')
|
||||
.append(cell.childNodes)
|
||||
.appendTo(cell);
|
||||
@@ -4105,9 +4114,9 @@
|
||||
isHeader &&
|
||||
jqCell.filter(':not([data-dt-order=disable])').length !== 0 &&
|
||||
jqCell.parent(':not([data-dt-order=disable])').length !== 0 &&
|
||||
$('span.dt-column-order', cell).length === 0
|
||||
$('.dt-column-order', cell).length === 0
|
||||
) {
|
||||
$('<span>')
|
||||
$(document.createElement(settings.columnTitleTag))
|
||||
.addClass('dt-column-order')
|
||||
.appendTo(cell);
|
||||
}
|
||||
@@ -4116,7 +4125,7 @@
|
||||
// layout for those elements
|
||||
var headerFooter = isHeader ? 'header' : 'footer';
|
||||
|
||||
if ( $('span.dt-column-' + headerFooter, cell).length === 0) {
|
||||
if ( $('div.dt-column-' + headerFooter, cell).length === 0) {
|
||||
$('<div>')
|
||||
.addClass('dt-column-' + headerFooter)
|
||||
.append(cell.childNodes)
|
||||
@@ -4273,6 +4282,10 @@
|
||||
// Custom Ajax option to submit the parameters as a JSON string
|
||||
if (baseAjax.submitAs === 'json' && typeof data === 'object') {
|
||||
baseAjax.data = JSON.stringify(data);
|
||||
|
||||
if (!baseAjax.contentType) {
|
||||
baseAjax.contentType = 'application/json; charset=utf-8';
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof ajax === 'function') {
|
||||
@@ -5531,7 +5544,7 @@
|
||||
var autoClass = _ext.type.className[column.sType];
|
||||
var padding = column.sContentPadding || (scrollX ? '-' : '');
|
||||
var text = longest + padding;
|
||||
var insert = longest.indexOf('<') === -1
|
||||
var insert = longest.indexOf('<') === -1 && longest.indexOf('&') === -1
|
||||
? document.createTextNode(text)
|
||||
: text
|
||||
|
||||
@@ -5719,15 +5732,20 @@
|
||||
.replace(/id=".*?"/g, '')
|
||||
.replace(/name=".*?"/g, '');
|
||||
|
||||
var s = _stripHtml(cellString)
|
||||
// Don't want Javascript at all in these calculation cells.
|
||||
cellString = cellString.replace(/<script.*?<\/script>/gi, ' ');
|
||||
|
||||
var noHtml = _stripHtml(cellString, ' ')
|
||||
.replace( / /g, ' ' );
|
||||
|
||||
// The length is calculated on the text only, but we keep the HTML
|
||||
// in the string so it can be used in the calculation table
|
||||
collection.push({
|
||||
str: s,
|
||||
len: s.length
|
||||
str: cellString,
|
||||
len: noHtml.length
|
||||
});
|
||||
|
||||
allStrings.push(s);
|
||||
allStrings.push(noHtml);
|
||||
}
|
||||
|
||||
// Order and then cut down to the size we need
|
||||
@@ -8782,7 +8800,7 @@
|
||||
// Automatic - find the _last_ unique cell from the top that is not empty (last for
|
||||
// backwards compatibility)
|
||||
for (var i=0 ; i<header.length ; i++) {
|
||||
if (header[i][column].unique && $('span.dt-column-title', header[i][column].cell).text()) {
|
||||
if (header[i][column].unique && $('.dt-column-title', header[i][column].cell).text()) {
|
||||
target = i;
|
||||
}
|
||||
}
|
||||
@@ -8878,6 +8896,10 @@
|
||||
return null;
|
||||
}
|
||||
|
||||
if (col.responsiveVisible === false) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Selector
|
||||
if (match[1]) {
|
||||
return $(nodes[idx]).filter(match[1]).length > 0 ? idx : null;
|
||||
@@ -9089,7 +9111,7 @@
|
||||
title = undefined;
|
||||
}
|
||||
|
||||
var span = $('span.dt-column-title', this.column(column).header(row));
|
||||
var span = $('.dt-column-title', this.column(column).header(row));
|
||||
|
||||
if (title !== undefined) {
|
||||
span.html(title);
|
||||
@@ -10263,8 +10285,8 @@
|
||||
|
||||
// Needed for header and footer, so pulled into its own function
|
||||
function cleanHeader(node, className) {
|
||||
$(node).find('span.dt-column-order').remove();
|
||||
$(node).find('span.dt-column-title').each(function () {
|
||||
$(node).find('.dt-column-order').remove();
|
||||
$(node).find('.dt-column-title').each(function () {
|
||||
var title = $(this).html();
|
||||
$(this).parent().parent().append(title);
|
||||
$(this).remove();
|
||||
@@ -10282,7 +10304,7 @@
|
||||
* @type string
|
||||
* @default Version number
|
||||
*/
|
||||
DataTable.version = "2.3.5";
|
||||
DataTable.version = "2.3.7";
|
||||
|
||||
/**
|
||||
* Private data store, containing all of the settings objects that are
|
||||
@@ -11450,7 +11472,10 @@
|
||||
iDeferLoading: null,
|
||||
|
||||
/** Event listeners */
|
||||
on: null
|
||||
on: null,
|
||||
|
||||
/** Title wrapper element type */
|
||||
columnTitleTag: 'span'
|
||||
};
|
||||
|
||||
_fnHungarianMap( DataTable.defaults );
|
||||
@@ -12414,7 +12439,10 @@
|
||||
orderHandler: true,
|
||||
|
||||
/** Title row indicator */
|
||||
titleRow: null
|
||||
titleRow: null,
|
||||
|
||||
/** Title wrapper element type */
|
||||
columnTitleTag: 'span'
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
+1460
-1460
File diff suppressed because it is too large
Load Diff
+2182
-3943
File diff suppressed because it is too large
Load Diff
@@ -27,7 +27,7 @@
|
||||
</symbol>
|
||||
</svg>
|
||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark mb-4 shadow fixed-top">
|
||||
<div class="container-xl">
|
||||
<div class="container-xxl">
|
||||
<a class="navbar-brand" href="{{urlpath}}/admin"><img class="vaultwarden-icon" src="{{urlpath}}/vw_static/vaultwarden-icon.png" alt="V">aultwarden Admin</a>
|
||||
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarCollapse"
|
||||
aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation">
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
<main class="container-xl">
|
||||
<main class="container-xxl">
|
||||
<div id="diagnostics-block" class="my-3 p-3 rounded shadow">
|
||||
<h6 class="border-bottom pb-2 mb-2">Diagnostics</h6>
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<dl class="row">
|
||||
<dt class="col-sm-5">Server Installed
|
||||
<span class="badge bg-success d-none abbr-badge" id="server-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none abbr-badge" id="server-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-warning text-dark d-none abbr-badge" id="server-warning" title="An update is available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none abbr-badge" id="server-branch" title="This is a branched version.">Branched</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
@@ -23,17 +23,17 @@
|
||||
{{#if page_data.web_vault_enabled}}
|
||||
<dt class="col-sm-5">Web Installed
|
||||
<span class="badge bg-success d-none abbr-badge" id="web-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none abbr-badge" id="web-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none abbr-badge" id="web-prerelease" title="You seem to be using a pre-release version.">Pre-Release</span>
|
||||
<span class="badge bg-warning text-dark d-none abbr-badge" id="web-warning" title="An update is available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none abbr-badge" id="web-prerelease" title="You are using a pre-release version.">Pre-Release</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="web-installed">{{page_data.web_vault_version}}</span>
|
||||
<span id="web-installed">{{page_data.active_web_release}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Web Latest
|
||||
<span class="badge bg-secondary d-none abbr-badge" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="web-latest">{{page_data.latest_web_build}}</span>
|
||||
<span id="web-latest">{{page_data.latest_web_release}}</span>
|
||||
</dd>
|
||||
{{/if}}
|
||||
{{#unless page_data.web_vault_enabled}}
|
||||
@@ -194,6 +194,14 @@
|
||||
<dd class="col-sm-7">
|
||||
<span id="http-response-errors" class="d-block"></span>
|
||||
</dd>
|
||||
{{#if page_data.invalid_feature_flags}}
|
||||
<dt class="col-sm-5">Invalid Feature Flags
|
||||
<span class="badge bg-warning text-dark abbr-badge" id="feature-flag-warning" title="Some feature flags are invalid or outdated!">Warning</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="feature-flags" class="d-block"><b>Flags:</b> <span id="feature-flags-string">{{page_data.invalid_feature_flags}}</span></span>
|
||||
</dd>
|
||||
{{/if}}
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
<main class="container-xl">
|
||||
<main class="container-xxl">
|
||||
{{#if error}}
|
||||
<div class="align-items-center p-3 mb-3 text-opacity-50 text-dark bg-warning rounded shadow">
|
||||
<div>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
<main class="container-xl">
|
||||
<main class="container-xxl">
|
||||
<div id="organizations-block" class="my-3 p-3 rounded shadow">
|
||||
<h6 class="border-bottom pb-2 mb-3">Organizations</h6>
|
||||
<div class="table-responsive-xl small">
|
||||
@@ -59,7 +59,7 @@
|
||||
</main>
|
||||
|
||||
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
|
||||
<script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/jquery-4.0.0.slim.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/datatables.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/admin_organizations.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/jdenticon-3.3.0.js"></script>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
<main class="container-xl">
|
||||
<main class="container-xxl">
|
||||
<div id="admin_token_warning" class="alert alert-warning alert-dismissible fade show d-none">
|
||||
<button type="button" class="btn-close" data-bs-target="admin_token_warning" data-bs-dismiss="alert" aria-label="Close"></button>
|
||||
You are using a plain text `ADMIN_TOKEN` which is insecure.<br>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
<main class="container-xl">
|
||||
<main class="container-xxl">
|
||||
<div id="users-block" class="my-3 p-3 rounded shadow">
|
||||
<h6 class="border-bottom pb-2 mb-3">Registered Users</h6>
|
||||
<div class="table-responsive-xl small">
|
||||
@@ -43,7 +43,7 @@
|
||||
</td>
|
||||
{{#if ../sso_enabled}}
|
||||
<td>
|
||||
<span class="d-block">{{sso_identifier}}</span>
|
||||
<span class="d-block text-break text-wrap">{{sso_identifier}}</span>
|
||||
</td>
|
||||
{{/if}}
|
||||
<td>
|
||||
@@ -153,7 +153,7 @@
|
||||
</main>
|
||||
|
||||
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
|
||||
<script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/jquery-4.0.0.slim.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/datatables.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/admin_users.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/jdenticon-3.3.0.js"></script>
|
||||
|
||||
@@ -137,6 +137,14 @@ bit-nav-logo bit-nav-item .bwi-shield {
|
||||
app-user-layout app-danger-zone button:nth-child(1) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide unsupported Forwarding email alias options */
|
||||
ng-dropdown-panel div.ng-dropdown-panel-items div:has(> [title="Firefox Relay"]) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
ng-dropdown-panel div.ng-dropdown-panel-items div:has(> [title="DuckDuckGo"]) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
/**** END Static Vaultwarden Changes ****/
|
||||
/**** START Dynamic Vaultwarden Changes ****/
|
||||
{{#if signup_disabled}}
|
||||
@@ -158,6 +166,13 @@ app-root a[routerlink="/signup"] {
|
||||
{{/if}}
|
||||
{{/if}}
|
||||
|
||||
{{#if remember_2fa_disabled}}
|
||||
/* Hide checkbox to remember 2FA token for 30 days */
|
||||
app-two-factor-auth > form > bit-form-control {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/if}}
|
||||
|
||||
{{#unless mail_2fa_enabled}}
|
||||
/* Hide `Email` 2FA if mail is not enabled */
|
||||
.providers-2fa-1 {
|
||||
@@ -192,6 +207,19 @@ bit-nav-item[route="sends"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/unless}}
|
||||
|
||||
{{#unless password_hints_allowed}}
|
||||
/* Hide password hints if not allowed */
|
||||
a[routerlink="/hint"],
|
||||
{{#if (webver "<2025.12.2")}}
|
||||
app-change-password > form > .form-group:nth-child(5),
|
||||
auth-input-password > form > bit-form-field:nth-child(4) {
|
||||
{{else}}
|
||||
.vw-password-hint {
|
||||
{{/if}}
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/unless}}
|
||||
/**** End Dynamic Vaultwarden Changes ****/
|
||||
/**** Include a special user stylesheet for custom changes ****/
|
||||
{{#if load_user_scss}}
|
||||
|
||||
+58
-21
@@ -16,7 +16,10 @@ use tokio::{
|
||||
time::{sleep, Duration},
|
||||
};
|
||||
|
||||
use crate::{config::PathType, CONFIG};
|
||||
use crate::{
|
||||
config::{PathType, SUPPORTED_FEATURE_FLAGS},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub struct AppHeaders();
|
||||
|
||||
@@ -153,9 +156,11 @@ impl Cors {
|
||||
fn get_allowed_origin(headers: &HeaderMap<'_>) -> Option<String> {
|
||||
let origin = Cors::get_header(headers, "Origin");
|
||||
let safari_extension_origin = "file://";
|
||||
let desktop_custom_file_origin = "bw-desktop-file://bundle";
|
||||
|
||||
if origin == CONFIG.domain_origin()
|
||||
|| origin == safari_extension_origin
|
||||
|| origin == desktop_custom_file_origin
|
||||
|| (CONFIG.sso_enabled() && origin == CONFIG.sso_authority())
|
||||
{
|
||||
Some(origin)
|
||||
@@ -531,7 +536,7 @@ struct WebVaultVersion {
|
||||
version: String,
|
||||
}
|
||||
|
||||
pub fn get_web_vault_version() -> String {
|
||||
pub fn get_active_web_release() -> String {
|
||||
let version_files = [
|
||||
format!("{}/vw-version.json", CONFIG.web_vault_folder()),
|
||||
format!("{}/version.json", CONFIG.web_vault_folder()),
|
||||
@@ -629,6 +634,21 @@ fn _process_key(key: &str) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deser_opt_nonempty_str<'de, D, T>(deserializer: D) -> Result<Option<T>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
T: From<String>,
|
||||
{
|
||||
use serde::Deserialize;
|
||||
Ok(Option::<String>::deserialize(deserializer)?.and_then(|s| {
|
||||
if s.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(T::from(s))
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum NumberOrString {
|
||||
@@ -714,7 +734,7 @@ where
|
||||
|
||||
warn!("Can't connect to database, retrying: {e:?}");
|
||||
|
||||
sleep(Duration::from_millis(1_000)).await;
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -763,21 +783,28 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
|
||||
}
|
||||
}
|
||||
|
||||
pub enum FeatureFlagFilter {
|
||||
#[allow(dead_code)]
|
||||
Unfiltered,
|
||||
ValidOnly,
|
||||
InvalidOnly,
|
||||
}
|
||||
|
||||
/// Parses the experimental client feature flags string into a HashMap.
|
||||
pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags: &str) -> HashMap<String, bool> {
|
||||
// These flags could still be configured, but are deprecated and not used anymore
|
||||
// To prevent old installations from starting filter these out and not error out
|
||||
const DEPRECATED_FLAGS: &[&str] =
|
||||
&["autofill-overlay", "autofill-v2", "browser-fileless-import", "extension-refresh", "fido2-vault-credentials"];
|
||||
pub fn parse_experimental_client_feature_flags(
|
||||
experimental_client_feature_flags: &str,
|
||||
filter_mode: FeatureFlagFilter,
|
||||
) -> HashMap<String, bool> {
|
||||
experimental_client_feature_flags
|
||||
.split(',')
|
||||
.filter_map(|f| {
|
||||
let flag = f.trim();
|
||||
if !flag.is_empty() && !DEPRECATED_FLAGS.contains(&flag) {
|
||||
return Some((flag.to_owned(), true));
|
||||
}
|
||||
None
|
||||
.map(str::trim)
|
||||
.filter(|flag| !flag.is_empty())
|
||||
.filter(|flag| match filter_mode {
|
||||
FeatureFlagFilter::Unfiltered => true,
|
||||
FeatureFlagFilter::ValidOnly => SUPPORTED_FEATURE_FLAGS.contains(flag),
|
||||
FeatureFlagFilter::InvalidOnly => !SUPPORTED_FEATURE_FLAGS.contains(flag),
|
||||
})
|
||||
.map(|flag| (flag.to_owned(), true))
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -791,14 +818,18 @@ pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
|
||||
std::net::IpAddr::V4(ip) => {
|
||||
!(ip.octets()[0] == 0 // "This network"
|
||||
|| ip.is_private()
|
||||
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|
||||
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) // ip.is_shared()
|
||||
|| ip.is_loopback()
|
||||
|| ip.is_link_local()
|
||||
// addresses reserved for future protocols (`192.0.0.0/24`)
|
||||
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|
||||
// .9 and .10 are documented as globally reachable so they're excluded
|
||||
|| (
|
||||
ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0
|
||||
&& ip.octets()[3] != 9 && ip.octets()[3] != 10
|
||||
)
|
||||
|| ip.is_documentation()
|
||||
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|
||||
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|
||||
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) // ip.is_reserved()
|
||||
|| ip.is_broadcast())
|
||||
}
|
||||
std::net::IpAddr::V6(ip) => {
|
||||
@@ -822,11 +853,17 @@ pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
|
||||
// AS112-v6 (`2001:4:112::/48`)
|
||||
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
|
||||
// ORCHIDv2 (`2001:20::/28`)
|
||||
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
|
||||
// Drone Remote ID Protocol Entity Tags (DETs) Prefix (`2001:30::/28`)`
|
||||
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x3F).contains(&b))
|
||||
))
|
||||
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|
||||
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|
||||
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
|
||||
// 6to4 (`2002::/16`) – it's not explicitly documented as globally reachable,
|
||||
// IANA says N/A.
|
||||
|| matches!(ip.segments(), [0x2002, _, _, _, _, _, _, _])
|
||||
|| matches!(ip.segments(), [0x2001, 0xdb8, ..] | [0x3fff, 0..=0x0fff, ..]) // ip.is_documentation()
|
||||
// Segment Routing (SRv6) SIDs (`5f00::/16`)
|
||||
|| matches!(ip.segments(), [0x5f00, ..])
|
||||
|| ip.is_unique_local()
|
||||
|| ip.is_unicast_link_local())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,3 +79,4 @@ for name, domain_list in domain_lists.items():
|
||||
# Write out the global domains JSON file.
|
||||
with open(file=OUTPUT_FILE, mode='w', encoding='utf-8') as f:
|
||||
json.dump(global_domains, f, indent=2)
|
||||
f.write("\n")
|
||||
|
||||
Reference in New Issue
Block a user