mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-12 03:25:58 +03:00
Compare commits
250 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
3646f14042 | ||
|
813e889c97 | ||
|
8bcd0ab0c6 | ||
|
5725d297b4 | ||
|
a428f05e77 | ||
|
467ecfdc99 | ||
|
ed8091a994 | ||
|
56cad93e0f | ||
|
3cf67e0b8d | ||
|
5800aceb2d | ||
|
729b563160 | ||
|
6b5618a5fc | ||
|
2aa72eb240 | ||
|
c8655c4f89 | ||
|
daaa03d1b3 | ||
|
9e5b94924f | ||
|
f21089900e | ||
|
0c0e632bc9 | ||
|
a13a5bd1d8 | ||
|
3b34b429f3 | ||
|
97ffd17789 | ||
|
10c5476d31 | ||
|
d3626eba2a | ||
|
de157b2654 | ||
|
337cbfaf22 | ||
|
f88b6d961e | ||
|
0426051541 | ||
|
4556f668de | ||
|
da8225a3bd | ||
|
f10e6b6ac2 | ||
|
7ec00d3850 | ||
|
8f8d7418ed | ||
|
af6d17b701 | ||
|
61183d001c | ||
|
024d12db08 | ||
|
dc7951efaf | ||
|
06e14fea55 | ||
|
0f656b4889 | ||
|
6fa1dc50be | ||
|
2bb41367bc | ||
|
20d8886bfa | ||
|
59ef82b740 | ||
|
fc543154c0 | ||
|
569b464157 | ||
|
adf83c698d | ||
|
8fcbc58ee2 | ||
|
2dcbb2be59 | ||
|
7026e004e1 | ||
|
a3084feaee | ||
|
e7d36de784 | ||
|
54cc47b14e | ||
|
fac44888cd | ||
|
9f056523c9 | ||
|
0af1ef387d | ||
|
f95f40be15 | ||
|
5c859e2e6c | ||
|
03ff5e6ece | ||
|
52d696aa74 | ||
|
a4e80712dd | ||
|
a947e434f0 | ||
|
2eb4f290a5 | ||
|
8ae799a771 | ||
|
9a5f3a5015 | ||
|
1ca0d6e245 | ||
|
7f69eebeb1 | ||
|
32bd9b83a3 | ||
|
477d60de49 | ||
|
1ba8275dcb | ||
|
a0a4994250 | ||
|
32dfa41970 | ||
|
f92efda0f0 | ||
|
3b0f643e9d | ||
|
5bcee24f88 | ||
|
9e3d7ea44c | ||
|
8cc6dac893 | ||
|
b7c4316c77 | ||
|
0c295d5e6e | ||
|
bc49d1f90d | ||
|
6f6d9dee83 | ||
|
cef5dd4a46 | ||
|
79061c0eb5 | ||
|
6e2c3fc1cc | ||
|
e301fe137f | ||
|
af69c83db2 | ||
|
53fa8da5b1 | ||
|
c58aac585b | ||
|
8c1117fcbf | ||
|
a6dd4f1206 | ||
|
5af1799991 | ||
|
a20a641de3 | ||
|
8abd38573b | ||
|
78abdf0e9d | ||
|
dc031d8d86 | ||
|
de6330b09d | ||
|
68bcc7a4b8 | ||
|
c04a1352cb | ||
|
5d1c11ceba | ||
|
a2aa7c9bc2 | ||
|
b3a351ccb2 | ||
|
679bc7a59b | ||
|
a72d0b518f | ||
|
6741b25907 | ||
|
24b5784f02 | ||
|
eb9b481eba | ||
|
64edc49392 | ||
|
0d1753ac74 | ||
|
a6558f5548 | ||
|
62dfeb80f2 | ||
|
26cd5d9643 | ||
|
e65fbbfc21 | ||
|
a2162f4d69 | ||
|
c9ed9aa733 | ||
|
9b20decdc1 | ||
|
adaefc8628 | ||
|
c6c45c4c49 | ||
|
95494083f2 | ||
|
686474f815 | ||
|
2c6bd8c9dc | ||
|
9366e31452 | ||
|
96ff32fb2f | ||
|
9342fa5744 | ||
|
50fc22966c | ||
|
4fab4c74ff | ||
|
e38e1a5d5f | ||
|
cc91ac6cc0 | ||
|
2d8c8e18f7 | ||
|
b17e2da2cf | ||
|
d121cce0d2 | ||
|
0eba7a88fa | ||
|
34ac16e9d7 | ||
|
906d9e2f1a | ||
|
623d84aeb5 | ||
|
f8122cd2ca | ||
|
9b7e86efc2 | ||
|
e7ccfbdd0e | ||
|
acc1474394 | ||
|
c90b3031a6 | ||
|
aaffb2e007 | ||
|
e0e95e95e4 | ||
|
fa70b440d0 | ||
|
42acb2ebb6 | ||
|
174bea8d6e | ||
|
f68a57950b | ||
|
f747bf126b | ||
|
1ca197fd46 | ||
|
63d05d929b | ||
|
ef5bf5d326 | ||
|
9d6e35d803 | ||
|
0cccdcab83 | ||
|
6607faa390 | ||
|
6fcf18ab51 | ||
|
d122c10573 | ||
|
ae9553ca1c | ||
|
ff919039c9 | ||
|
80eb15d46a | ||
|
c36b870c54 | ||
|
b7cbca590c | ||
|
606a1bbfcb | ||
|
3e5369c8dd | ||
|
dd5e4cec73 | ||
|
a31a040abd | ||
|
f0125b95c1 | ||
|
072f2e24c2 | ||
|
36b5350f9b | ||
|
c7489c9fdf | ||
|
3181e4e96e | ||
|
2ee0d53c5f | ||
|
dfa629ecc7 | ||
|
92dc48b882 | ||
|
367e1ce289 | ||
|
7390f34355 | ||
|
c47d9f6593 | ||
|
5399ee8208 | ||
|
117045e6d3 | ||
|
912ad64555 | ||
|
00855ee31d | ||
|
c18a273b4a | ||
|
ca24a4adf1 | ||
|
a263aaa481 | ||
|
0a20ba0020 | ||
|
6541600af6 | ||
|
525979d5d9 | ||
|
7dd1959eba | ||
|
e266b39254 | ||
|
e935989fee | ||
|
25c401f64d | ||
|
18b72da657 | ||
|
e8e6c89927 | ||
|
fd5f657334 | ||
|
da9605f2d2 | ||
|
7030de32d5 | ||
|
b67c5b77be | ||
|
d30878c4ea | ||
|
6be26f0a38 | ||
|
34a6bfaefa | ||
|
1c8749eb4d | ||
|
1198c36a2b | ||
|
41e6c1a383 | ||
|
0042c3e4a7 | ||
|
724190f262 | ||
|
6867d23ca2 | ||
|
de26af0c2d | ||
|
3f223a7514 | ||
|
23f5a62d61 | ||
|
81e2054f59 | ||
|
f9337effa5 | ||
|
2972904eb8 | ||
|
bdd918b4d4 | ||
|
88085fe17b | ||
|
2020a302d0 | ||
|
ab2dd0f300 | ||
|
8e6fd4b4a1 | ||
|
988d24927e | ||
|
e945d16fcf | ||
|
f1c0aa4f83 | ||
|
68362d06b3 | ||
|
f65c0e2ac8 | ||
|
0f588ced03 | ||
|
b0f03bb49c | ||
|
5063661028 | ||
|
7e66ab78ff | ||
|
665e275dc5 | ||
|
a6da728cca | ||
|
04e02d7f9f | ||
|
7c739dd58e | ||
|
05a552910c | ||
|
c990837066 | ||
|
57aec37507 | ||
|
0c5b4476ad | ||
|
17141147a8 | ||
|
193c2fa860 | ||
|
6d01aaa80f | ||
|
ad60eaa0f3 | ||
|
d878face07 | ||
|
8bf8388cd6 | ||
|
b4db853bcb | ||
|
5ee94c0ba9 | ||
|
f108349547 | ||
|
d25e1ab94b | ||
|
79fee269ee | ||
|
ffe362f856 | ||
|
04bb15a802 | ||
|
4d9d649db9 | ||
|
2897c24e83 | ||
|
5964dc95f0 | ||
|
613b2519ed | ||
|
996b60e43d | ||
|
a6d09407b9 | ||
|
f2e9ddef4e | ||
|
ca417d3257 |
@@ -259,9 +259,13 @@
|
||||
## A comma-separated list means only those users can create orgs:
|
||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||
|
||||
## Token for the admin interface, preferably use a long random string
|
||||
## One option is to use 'openssl rand -base64 48'
|
||||
## Token for the admin interface, preferably an Argon2 PCH string
|
||||
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
||||
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
||||
## If not set, the admin panel is disabled
|
||||
## New Argon2 PHC string
|
||||
# ADMIN_TOKEN='$argon2id$v=19$m=65540,t=3,p=4$MmeKRnGK5RW5mJS7h3TOL89GrpLPXJPAtTK8FTqj9HM$DqsstvoSAETl9YhnsXbf43WeaUwJC6JhViIvuPoig78'
|
||||
## Old plain text string (Will generate warnings in favor of Argon2)
|
||||
# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp
|
||||
|
||||
## Enable this to bypass the admin panel security. This option is only
|
||||
@@ -298,9 +302,9 @@
|
||||
## This setting applies globally to all users.
|
||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||
|
||||
## Controls the PBBKDF password iterations to apply on the server
|
||||
## The change only applies when the password is changed
|
||||
# PASSWORD_ITERATIONS=100000
|
||||
## Number of server-side passwords hashing iterations for the password hash.
|
||||
## The default for new users. If changed, it will be updated during login for existing users.
|
||||
# PASSWORD_ITERATIONS=350000
|
||||
|
||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||
# PASSWORD_HINTS_ALLOWED=true
|
||||
@@ -335,6 +339,9 @@
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||
|
||||
## Set the lifetime of admin sessions to this value (in minutes).
|
||||
# ADMIN_SESSION_LIFETIME=20
|
||||
|
||||
## Yubico (Yubikey) Settings
|
||||
## Set your Client ID and Secret Key for Yubikey OTP
|
||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||
@@ -373,7 +380,7 @@
|
||||
# ROCKET_WORKERS=10
|
||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||
|
||||
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
||||
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
@@ -385,6 +392,11 @@
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_TIMEOUT=15
|
||||
|
||||
# Whether to send mail via the `sendmail` command
|
||||
# USE_SENDMAIL=false
|
||||
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||
# SENDMAIL_COMMAND="/path/to/sendmail"
|
||||
|
||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||
## Multiple options need to be separated by a comma ','.
|
||||
|
36
.github/workflows/build.yml
vendored
36
.github/workflows/build.yml
vendored
@@ -9,6 +9,8 @@ on:
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
- "rustfmt.toml"
|
||||
- "diesel.toml"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/build.yml"
|
||||
@@ -17,6 +19,8 @@ on:
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
- "rustfmt.toml"
|
||||
- "diesel.toml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -26,42 +30,48 @@ jobs:
|
||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||
env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: git # Use the old git protocol until it is stable probably in 1.68 or 1.69. MSRV needs to be at this before removed.
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
channel:
|
||||
- "rust-toolchain" # The version defined in rust-toolchain
|
||||
- "msrv" # The supported MSRV
|
||||
include:
|
||||
- channel: "msrv"
|
||||
version: "1.60.0"
|
||||
|
||||
name: Build and Test ${{ matrix.channel }}
|
||||
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: "Install dependencies Ubuntu"
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||
# End Install dependencies
|
||||
|
||||
|
||||
# Determine rust-toolchain version
|
||||
- name: Init Variables
|
||||
id: toolchain
|
||||
shell: bash
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||
else
|
||||
RUST_TOOLCHAIN="${{ matrix.channel }}"
|
||||
fi
|
||||
echo "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" | tee -a "${GITHUB_OUTPUT}"
|
||||
# End Determine rust-toolchain version
|
||||
|
||||
# Uses the rust-toolchain file to determine version
|
||||
|
||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb # master @ 2022-10-25 - 21:40 GMT+2
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d # master @ 2023-03-21 - 06:36 GMT+1
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
@@ -69,17 +79,17 @@ jobs:
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Install the MSRV channel to be used
|
||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||
- name: "Install MSRV version"
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb # master @ 2022-10-25 - 21:40 GMT+2
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d # master @ 2023-03-21 - 06:36 GMT+1
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
# End Install the MSRV channel to be used
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0
|
||||
- uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1
|
||||
# End Enable Rust Caching
|
||||
|
||||
|
||||
@@ -184,7 +194,7 @@ jobs:
|
||||
|
||||
# Upload artifact to Github Actions
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
name: vaultwarden
|
||||
|
2
.github/workflows/hadolint.yml
vendored
2
.github/workflows/hadolint.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
|
155
.github/workflows/release.yml
vendored
155
.github/workflows/release.yml
vendored
@@ -48,11 +48,23 @@ jobs:
|
||||
ports:
|
||||
- 5000:5000
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1 # Disabled for now, but we should look at this because it will speedup building!
|
||||
# DOCKER_REPO/secrets.DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
|
||||
DOCKER_REPO: ${{ secrets.DOCKERHUB_REPO }}
|
||||
# Use BuildKit (https://docs.docker.com/build/buildkit/) for better
|
||||
# build performance and the ability to copy extended file attributes
|
||||
# (e.g., for executable capabilities) across build phases.
|
||||
DOCKER_BUILDKIT: 1
|
||||
SOURCE_COMMIT: ${{ github.sha }}
|
||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||
# The *_REPO variables need to be configured as repository variables
|
||||
# Append `/settings/variables/actions` to your repo url
|
||||
# DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
|
||||
# Check for Docker hub credentials in secrets
|
||||
HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
||||
# GHCR_REPO needs to be 'ghcr.io/<user>/<repo>'
|
||||
# Check for Github credentials in secrets
|
||||
HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }}
|
||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||
# Check for Quay.io credentials in secrets
|
||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -61,17 +73,10 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Determine Docker Tag
|
||||
- name: Init Variables
|
||||
id: vars
|
||||
@@ -85,34 +90,146 @@ jobs:
|
||||
fi
|
||||
# End Determine Docker Tag
|
||||
|
||||
- name: Build Debian based images
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Login to Quay.io
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
# Debian
|
||||
|
||||
# Docker Hub
|
||||
- name: Build Debian based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' }}
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images
|
||||
- name: Push Debian based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' }}
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Build Alpine based images
|
||||
# GitHub Container Registry
|
||||
- name: Build Debian based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Quay.io
|
||||
- name: Build Debian based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
# Alpine
|
||||
|
||||
# Docker Hub
|
||||
- name: Build Alpine based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images
|
||||
- name: Push Alpine based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
# GitHub Container Registry
|
||||
- name: Build Alpine based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Quay.io
|
||||
- name: Build Alpine based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
@@ -3,5 +3,7 @@ ignored:
|
||||
- DL3008
|
||||
# disable explicit version for apk install
|
||||
- DL3018
|
||||
# disable check for consecutive `RUN` instructions
|
||||
- DL3059
|
||||
trustedRegistries:
|
||||
- docker.io
|
||||
|
@@ -1,16 +1,20 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.3.0
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: mixed-line-ending
|
||||
args: ["--fix=no"]
|
||||
- id: end-of-file-fixer
|
||||
exclude: "(.*js$|.*css$)"
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: check-symlinks
|
||||
- id: forbid-submodules
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: fmt
|
||||
@@ -36,5 +40,5 @@ repos:
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$)
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|clippy.toml|.*\.rs$)
|
||||
pass_filenames: false
|
||||
|
1462
Cargo.lock
generated
1462
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
94
Cargo.toml
94
Cargo.toml
@@ -3,12 +3,12 @@ name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.60.0"
|
||||
rust-version = "1.66.1"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
readme = "README.md"
|
||||
license = "GPL-3.0-only"
|
||||
license = "AGPL-3.0-only"
|
||||
publish = false
|
||||
build = "build.rs"
|
||||
|
||||
@@ -41,43 +41,43 @@ syslog = "6.0.1" # Needs to be v4 until fern is updated
|
||||
[dependencies]
|
||||
# Logging
|
||||
log = "0.4.17"
|
||||
fern = { version = "0.6.1", features = ["syslog-6"] }
|
||||
fern = { version = "0.6.2", features = ["syslog-6"] }
|
||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
backtrace = "0.3.67" # Logging panics to logfile instead stderr only
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenvy = { version = "0.15.6", default-features = false }
|
||||
dotenvy = { version = "0.15.7", default-features = false }
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.16.0"
|
||||
once_cell = "1.17.1"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.15"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# Web framework
|
||||
rocket = { version = "0.5.0-rc.2", features = ["tls", "json"], default-features = false }
|
||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
||||
|
||||
# WebSockets libraries
|
||||
tokio-tungstenite = "0.18.0"
|
||||
rmpv = "1.0.0" # MessagePack library
|
||||
|
||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||
dashmap = "5.4.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.25"
|
||||
tokio = { version = "1.23.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
futures = "0.3.27"
|
||||
tokio = { version = "1.26.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.150", features = ["derive"] }
|
||||
serde_json = "1.0.89"
|
||||
serde = { version = "1.0.158", features = ["derive"] }
|
||||
serde_json = "1.0.94"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "2.0.2", features = ["chrono", "r2d2"] }
|
||||
diesel = { version = "2.0.3", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "2.0.0"
|
||||
diesel_logger = { version = "0.2.0", optional = true }
|
||||
|
||||
# Bundled SQLite
|
||||
# Bundled/Static SQLite
|
||||
libsqlite3-sys = { version = "0.25.2", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
@@ -85,21 +85,21 @@ rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
ring = "0.16.20"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "1.2.2", features = ["v4"] }
|
||||
uuid = { version = "1.3.0", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.23", features = ["clock", "serde"], default-features = false }
|
||||
chrono = { version = "0.4.24", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.8.1"
|
||||
time = "0.3.17"
|
||||
time = "0.3.20"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler_ng = "2.0.3"
|
||||
job_scheduler_ng = "2.0.4"
|
||||
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.3.3"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "8.2.0"
|
||||
jsonwebtoken = "8.3.0"
|
||||
|
||||
# TOTP library
|
||||
totp-lite = "2.0.0"
|
||||
@@ -110,56 +110,68 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
||||
# WebAuthn libraries
|
||||
webauthn-rs = "0.3.2"
|
||||
|
||||
# Handling of URL's for WebAuthn
|
||||
# Handling of URL's for WebAuthn and favicons
|
||||
url = "2.3.1"
|
||||
|
||||
# Email librariese-Base, Update crates and small change.
|
||||
lettre = { version = "0.10.1", features = ["smtp-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
# Email libraries
|
||||
lettre = { version = "0.10.3", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.2.0" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.4"
|
||||
|
||||
# Template library
|
||||
handlebars = { version = "4.3.5", features = ["dir_source"] }
|
||||
# HTML Template library
|
||||
handlebars = { version = "4.3.6", features = ["dir_source"] }
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.13", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||
reqwest = { version = "0.11.15", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
# Favicon extraction libraries
|
||||
html5gum = "0.5.2"
|
||||
regex = { version = "1.7.0", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
regex = { version = "1.7.3", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.2.0"
|
||||
bytes = "1.3.0"
|
||||
cached = "0.40.0"
|
||||
bytes = "1.4.0"
|
||||
|
||||
# Cache function results (Used for version check and favicon fetching)
|
||||
cached = "0.42.0"
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.16.1"
|
||||
cookie = "0.16.2"
|
||||
cookie_store = "0.19.0"
|
||||
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.44"
|
||||
# Used by U2F, JWT and PostgreSQL
|
||||
openssl = "0.10.48"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.10"
|
||||
paste = "1.0.12"
|
||||
governor = "0.5.1"
|
||||
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.14"
|
||||
semver = "1.0.17"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.32", features = ["secure"], default-features = false, optional = true }
|
||||
mimalloc = { version = "0.1.34", features = ["secure"], default-features = false, optional = true }
|
||||
which = "4.4.0"
|
||||
|
||||
[patch.crates-io]
|
||||
# Using a patched version of multer-rs (Used by Rocket) to fix attachment/send file uploads
|
||||
# Issue: https://github.com/dani-garcia/vaultwarden/issues/2644
|
||||
# Patch: https://github.com/BlackDex/multer-rs/commit/477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a
|
||||
multer = { git = "https://github.com/BlackDex/multer-rs", rev = "477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a" }
|
||||
# Argon2 library with support for the PHC format
|
||||
argon2 = "0.5.0"
|
||||
|
||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||
rpassword = "7.2.0"
|
||||
|
||||
# Strip debuginfo from the release builds
|
||||
# Also enable thin LTO for some optimizations
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
|
||||
# Always build argon2 using opt-level 3
|
||||
# This is a huge speed improvement during testing
|
||||
[profile.dev.package.argon2]
|
||||
opt-level = 3
|
||||
|
||||
# A little bit of a speedup
|
||||
[profile.dev]
|
||||
split-debuginfo = "unpacked"
|
||||
|
143
LICENSE.txt
143
LICENSE.txt
@@ -1,5 +1,5 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
@@ -7,17 +7,15 @@
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
@@ -26,44 +24,34 @@ them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
@@ -72,7 +60,7 @@ modification follow.
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
@@ -549,35 +537,45 @@ to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
@@ -635,40 +633,29 @@ the "copyright" line and a pointer to where the full notice is found.
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
|
15
README.md
15
README.md
@@ -3,11 +3,13 @@
|
||||
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||
|
||||
---
|
||||
|
||||
[](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml)
|
||||
[](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden)
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://quay.io/repository/vaultwarden/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
@@ -23,12 +25,13 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
||||
Basically full implementation of Bitwarden API is provided including:
|
||||
|
||||
* Organizations support
|
||||
* Attachments
|
||||
* Attachments and Send
|
||||
* Vault API support
|
||||
* Serving the static files for Vault interface
|
||||
* Website icons API
|
||||
* Authenticator and U2F support
|
||||
* YubiKey and Duo support
|
||||
* Emergency Access
|
||||
|
||||
## Installation
|
||||
Pull the docker image and mount a volume from the host for persistent storage:
|
||||
@@ -39,7 +42,7 @@ docker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server
|
||||
```
|
||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||
|
||||
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
||||
**IMPORTANT**: Most modern web browsers, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
|
||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
|
||||
@@ -49,9 +52,9 @@ If you have an available domain name, you can get HTTPS certificates with [Let's
|
||||
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||
|
||||
## Get in touch
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://vaultwarden.discourse.group/).
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please use [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [the forum](https://vaultwarden.discourse.group/).
|
||||
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure there aren't any similar issues open, though!
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure you are on the latest version and there aren't any similar issues open, though!
|
||||
|
||||
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||
|
||||
|
@@ -3,22 +3,22 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.66-bullseye" %}
|
||||
{% set build_stage_base_image = "rust:1.68.1-bullseye" %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable-1.66.0" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable-1.68.1" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.17" %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable-1.66.0" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable-1.68.1" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.17" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable-1.66.0" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable-1.68.1" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.17" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable-1.66.0" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable-1.68.1" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.17" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
||||
{% endif %}
|
||||
@@ -50,7 +50,7 @@
|
||||
{% else %}
|
||||
{% set package_arch_target_param = "" %}
|
||||
{% endif %}
|
||||
{% if "buildx" in target_file %}
|
||||
{% if "buildkit" in target_file %}
|
||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
||||
{% else %}
|
||||
{% set mount_rust_cache = "" %}
|
||||
@@ -59,8 +59,8 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_version = "v2022.12.0" %}
|
||||
{% set vault_image_digest = "sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e" %}
|
||||
{% set vault_version = "v2023.3.0b" %}
|
||||
{% set vault_image_digest = "sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee" %}
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
@@ -83,8 +83,6 @@ FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
@@ -93,7 +91,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -104,21 +101,20 @@ RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
||||
{% endif %}
|
||||
{% elif "arm" in target_file %}
|
||||
#
|
||||
# Install required build libs for {{ package_arch_name }} architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the {{ package_arch_name }} architecture
|
||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
libc6-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
libmariadb3{{ package_arch_prefix }} \
|
||||
libcap2-bin \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
libmariadb3{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
||||
@@ -130,16 +126,14 @@ ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
|
||||
{% elif "amd64" in target_file %}
|
||||
# Install DB packages
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libcap2-bin \
|
||||
libmariadb-dev \
|
||||
libpq-dev
|
||||
{% endif %}
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -178,9 +172,20 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
|
||||
{% if "buildkit" in target_file %}
|
||||
# Add the `cap_net_bind_service` capability to allow listening on
|
||||
# privileged (< 1024) ports even when running as a non-root user.
|
||||
# This is only done if building with BuildKit; with the legacy
|
||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
||||
{% if package_arch_target is defined %}
|
||||
RUN setcap cap_net_bind_service=+ep target/{{ package_arch_target }}/release/vaultwarden
|
||||
{% else %}
|
||||
RUN setcap cap_net_bind_service=+ep target/release/vaultwarden
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -195,7 +200,6 @@ ENV ROCKET_PROFILE="release" \
|
||||
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
{% endif %}
|
||||
|
||||
@@ -203,18 +207,18 @@ RUN [ "cross-build-start" ]
|
||||
RUN mkdir /data \
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
{% else %}
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
@@ -222,13 +226,11 @@ RUN mkdir /data \
|
||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
{% endif -%}
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
{% endif %}
|
||||
|
||||
|
@@ -8,8 +8,8 @@ all: $(OBJECTS)
|
||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildx: Dockerfile.j2 render_template
|
||||
%/Dockerfile.buildkit: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildx.alpine: Dockerfile.j2 render_template
|
||||
%/Dockerfile.buildkit.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM rust:1.68.1-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,19 +37,17 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install DB packages
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libcap2-bin \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libpq-dev
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -81,9 +77,9 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -98,11 +94,11 @@ ENV ROCKET_PROFILE="release" \
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.68.1 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +37,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,9 +72,9 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -93,10 +90,10 @@ ENV ROCKET_PROFILE="release" \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM rust:1.68.1-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,19 +37,17 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install DB packages
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libcap2-bin \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libpq-dev
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -81,9 +77,14 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
||||
|
||||
# Add the `cap_net_bind_service` capability to allow listening on
|
||||
# privileged (< 1024) ports even when running as a non-root user.
|
||||
# This is only done if building with BuildKit; with the legacy
|
||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
||||
RUN setcap cap_net_bind_service=+ep target/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -98,11 +99,11 @@ ENV ROCKET_PROFILE="release" \
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.68.1 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +37,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,9 +72,14 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
# Add the `cap_net_bind_service` capability to allow listening on
|
||||
# privileged (< 1024) ports even when running as a non-root user.
|
||||
# This is only done if building with BuildKit; with the legacy
|
||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
||||
RUN setcap cap_net_bind_service=+ep target/x86_64-unknown-linux-musl/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -93,10 +95,10 @@ ENV ROCKET_PROFILE="release" \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
|
||||
VOLUME /data
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM rust:1.68.1-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +37,24 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the arm64 architecture
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libcap2-bin \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libmariadb3:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libssl-dev:arm64 \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +67,6 @@ ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,9 +96,9 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -113,22 +108,20 @@ ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.68.1 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +37,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,9 +72,9 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -89,18 +86,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM rust:1.68.1-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +37,24 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the arm64 architecture
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libcap2-bin \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libmariadb3:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libssl-dev:arm64 \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +67,6 @@ ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,9 +96,14 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
# Add the `cap_net_bind_service` capability to allow listening on
|
||||
# privileged (< 1024) ports even when running as a non-root user.
|
||||
# This is only done if building with BuildKit; with the legacy
|
||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
||||
RUN setcap cap_net_bind_service=+ep target/aarch64-unknown-linux-gnu/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -113,22 +113,20 @@ ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.68.1 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +37,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,9 +72,14 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
# Add the `cap_net_bind_service` capability to allow listening on
|
||||
# privileged (< 1024) ports even when running as a non-root user.
|
||||
# This is only done if building with BuildKit; with the legacy
|
||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
||||
RUN setcap cap_net_bind_service=+ep target/aarch64-unknown-linux-musl/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -89,18 +91,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM rust:1.68.1-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +37,24 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armel architecture
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libcap2-bin \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libmariadb3:armel \
|
||||
libpq-dev:armel \
|
||||
libpq5:armel \
|
||||
libssl-dev:armel \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +67,6 @@ ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,9 +96,9 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -113,27 +108,24 @@ ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.68.1 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +37,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -77,9 +74,9 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -91,18 +88,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM rust:1.68.1-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +37,24 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armel architecture
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libcap2-bin \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libmariadb3:armel \
|
||||
libpq-dev:armel \
|
||||
libpq5:armel \
|
||||
libssl-dev:armel \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +67,6 @@ ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,9 +96,14 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
# Add the `cap_net_bind_service` capability to allow listening on
|
||||
# privileged (< 1024) ports even when running as a non-root user.
|
||||
# This is only done if building with BuildKit; with the legacy
|
||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
||||
RUN setcap cap_net_bind_service=+ep target/arm-unknown-linux-gnueabi/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -113,27 +113,24 @@ ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.68.1 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +37,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -77,9 +74,14 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
# Add the `cap_net_bind_service` capability to allow listening on
|
||||
# privileged (< 1024) ports even when running as a non-root user.
|
||||
# This is only done if building with BuildKit; with the legacy
|
||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
||||
RUN setcap cap_net_bind_service=+ep target/arm-unknown-linux-musleabi/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -91,18 +93,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM rust:1.68.1-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +37,24 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armhf architecture
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libcap2-bin \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libmariadb3:armhf \
|
||||
libpq-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libssl-dev:armhf \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +67,6 @@ ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,9 +96,9 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -113,22 +108,20 @@ ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.68.1 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +37,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,9 +72,9 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -89,18 +86,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM rust:1.68.1-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +37,24 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armhf architecture
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libcap2-bin \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libmariadb3:armhf \
|
||||
libpq-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libssl-dev:armhf \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +67,6 @@ ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,9 +96,14 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
# Add the `cap_net_bind_service` capability to allow listening on
|
||||
# privileged (< 1024) ports even when running as a non-root user.
|
||||
# This is only done if building with BuildKit; with the legacy
|
||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
||||
RUN setcap cap_net_bind_service=+ep target/armv7-unknown-linux-gnueabihf/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -113,22 +113,20 @@ ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -16,20 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull vaultwarden/web-vault:v2023.3.0b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2023.3.0b
|
||||
# [vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee
|
||||
# [vaultwarden/web-vault:v2023.3.0b]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM vaultwarden/web-vault@sha256:aa6ba791911a815ea570ec2ddc59992481c6ba8fbb65eed4f7074b463430d3ee as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.68.1 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +37,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,9 +72,14 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
# Add the `cap_net_bind_service` capability to allow listening on
|
||||
# privileged (< 1024) ports even when running as a non-root user.
|
||||
# This is only done if building with BuildKit; with the legacy
|
||||
# builder, the `COPY` instruction doesn't carry over capabilities.
|
||||
RUN setcap cap_net_bind_service=+ep target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
@@ -89,18 +91,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -1,3 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# The default Debian-based images support these arches for all database backends.
|
||||
arches=(
|
||||
amd64
|
||||
@@ -5,7 +7,9 @@ arches=(
|
||||
armv7
|
||||
arm64
|
||||
)
|
||||
export arches
|
||||
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
distro_suffix=.alpine
|
||||
fi
|
||||
export distro_suffix
|
||||
|
13
hooks/build
13
hooks/build
@@ -1,7 +1,8 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
echo ">>> Building images..."
|
||||
|
||||
# shellcheck source=arches.sh
|
||||
source ./hooks/arches.sh
|
||||
|
||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||
@@ -23,10 +24,10 @@ LABELS=(
|
||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||
org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
|
||||
org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||
)
|
||||
LABEL_ARGS=()
|
||||
@@ -34,9 +35,9 @@ for label in "${LABELS[@]}"; do
|
||||
LABEL_ARGS+=(--label "${label}")
|
||||
done
|
||||
|
||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildx as template
|
||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildkit as template
|
||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
||||
buildx_suffix=.buildx
|
||||
buildkit_suffix=.buildkit
|
||||
fi
|
||||
|
||||
set -ex
|
||||
@@ -45,6 +46,6 @@ for arch in "${arches[@]}"; do
|
||||
docker build \
|
||||
"${LABEL_ARGS[@]}" \
|
||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||
-f docker/${arch}/Dockerfile${buildx_suffix}${distro_suffix} \
|
||||
-f "docker/${arch}/Dockerfile${buildkit_suffix}${distro_suffix}" \
|
||||
.
|
||||
done
|
||||
|
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
|
54
hooks/push
54
hooks/push
@@ -1,5 +1,6 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# shellcheck source=arches.sh
|
||||
source ./hooks/arches.sh
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
@@ -41,7 +42,7 @@ LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||
|
||||
echo ">>> Pushing images to local registry..."
|
||||
|
||||
for arch in ${arches[@]}; do
|
||||
for arch in "${arches[@]}"; do
|
||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||
docker tag "${docker_image}" "${local_image}"
|
||||
@@ -71,9 +72,9 @@ tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||
# to make it easier for users to track the latest release.
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
tags+=(${DOCKER_REPO}:alpine)
|
||||
tags+=("${DOCKER_REPO}:alpine")
|
||||
else
|
||||
tags+=(${DOCKER_REPO}:latest)
|
||||
tags+=("${DOCKER_REPO}:latest")
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -91,10 +92,10 @@ declare -A arch_to_platform=(
|
||||
[arm64]="linux/arm64"
|
||||
)
|
||||
platforms=()
|
||||
for arch in ${arches[@]}; do
|
||||
for arch in "${arches[@]}"; do
|
||||
platforms+=("${arch_to_platform[$arch]}")
|
||||
done
|
||||
platforms="$(join "," "${platforms[@]}")"
|
||||
platform="$(join "," "${platforms[@]}")"
|
||||
|
||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||
@@ -104,46 +105,7 @@ docker buildx build \
|
||||
--network host \
|
||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||
--platform "${platforms}" \
|
||||
--platform "${platform}" \
|
||||
"${tag_args[@]}" \
|
||||
--push \
|
||||
- < ./docker/Dockerfile.buildx
|
||||
|
||||
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
|
||||
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
#
|
||||
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||
# existing vaultwarden tags, which adhere to the naming conventions of the
|
||||
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
|
||||
#
|
||||
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
|
||||
# so this step can be removed once fixed versions are in wider distribution.
|
||||
#
|
||||
# Tags:
|
||||
#
|
||||
# testing => testing-arm32v6
|
||||
# testing-alpine => <ignored>
|
||||
# x.y.z => x.y.z-arm32v6, latest-arm32v6
|
||||
# x.y.z-alpine => <ignored>
|
||||
#
|
||||
if [[ "${DOCKER_TAG}" != *alpine ]]; then
|
||||
image="${DOCKER_REPO}":"${DOCKER_TAG}"
|
||||
|
||||
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
|
||||
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
|
||||
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
|
||||
|
||||
# Pull the armv6 image by digest, retag it, and repush it.
|
||||
docker pull "${DOCKER_REPO}"@"${digest}"
|
||||
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
|
||||
docker push "${image}"-arm32v6
|
||||
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
|
||||
docker push "${DOCKER_REPO}:latest"-arm32v6
|
||||
fi
|
||||
fi
|
||||
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN reset_password_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN avatar_color VARCHAR(7);
|
7
migrations/mysql/2023-01-31-222222_add_argon2/up.sql
Normal file
7
migrations/mysql/2023-01-31-222222_add_argon2/up.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_memory INTEGER DEFAULT NULL;
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_parallelism INTEGER DEFAULT NULL;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN reset_password_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN avatar_color TEXT;
|
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_memory INTEGER DEFAULT NULL;
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_parallelism INTEGER DEFAULT NULL;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN reset_password_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN avatar_color TEXT;
|
7
migrations/sqlite/2023-01-31-222222_add_argon2/up.sql
Normal file
7
migrations/sqlite/2023-01-31-222222_add_argon2/up.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_memory INTEGER DEFAULT NULL;
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_parallelism INTEGER DEFAULT NULL;
|
@@ -1 +1 @@
|
||||
1.66.0
|
||||
1.68.1
|
||||
|
@@ -1,7 +1,4 @@
|
||||
# version = "Two"
|
||||
edition = "2021"
|
||||
max_width = 120
|
||||
newline_style = "Unix"
|
||||
use_small_heuristics = "Off"
|
||||
# struct_lit_single_line = false
|
||||
# overflow_delimited_expr = true
|
||||
|
189
src/api/admin.rs
189
src/api/admin.rs
@@ -13,7 +13,7 @@ use rocket::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::{core::log_event, ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||
api::{core::log_event, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||
@@ -33,6 +33,7 @@ pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
get_users_json,
|
||||
get_user_json,
|
||||
get_user_by_mail_json,
|
||||
post_admin_login,
|
||||
admin_page,
|
||||
invite_user,
|
||||
@@ -144,7 +145,6 @@ fn render_admin_login(msg: Option<&str>, redirect: Option<String>) -> ApiResult<
|
||||
let msg = msg.map(|msg| format!("Error: {msg}"));
|
||||
let json = json!({
|
||||
"page_content": "admin/login",
|
||||
"version": VERSION,
|
||||
"error": msg,
|
||||
"redirect": redirect,
|
||||
"urlpath": CONFIG.domain_path()
|
||||
@@ -184,7 +184,7 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
||||
|
||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||
.path(admin_path())
|
||||
.max_age(rocket::time::Duration::minutes(20))
|
||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||
.same_site(SameSite::Strict)
|
||||
.http_only(true)
|
||||
.finish();
|
||||
@@ -201,6 +201,19 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
||||
fn _validate_token(token: &str) -> bool {
|
||||
match CONFIG.admin_token().as_ref() {
|
||||
None => false,
|
||||
Some(t) if t.starts_with("$argon2") => {
|
||||
use argon2::password_hash::PasswordVerifier;
|
||||
match argon2::password_hash::PasswordHash::new(t) {
|
||||
Ok(h) => {
|
||||
// NOTE: hash params from `ADMIN_TOKEN` are used instead of what is configured in the `Argon2` instance.
|
||||
argon2::Argon2::default().verify_password(token.trim().as_ref(), &h).is_ok()
|
||||
}
|
||||
Err(e) => {
|
||||
error!("The configured Argon2 PHC in `ADMIN_TOKEN` is invalid: {e}");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(t) => crate::crypto::ct_eq(t.trim(), token.trim()),
|
||||
}
|
||||
}
|
||||
@@ -208,34 +221,16 @@ fn _validate_token(token: &str) -> bool {
|
||||
#[derive(Serialize)]
|
||||
struct AdminTemplateData {
|
||||
page_content: String,
|
||||
version: Option<&'static str>,
|
||||
page_data: Option<Value>,
|
||||
config: Value,
|
||||
can_backup: bool,
|
||||
logged_in: bool,
|
||||
urlpath: String,
|
||||
}
|
||||
|
||||
impl AdminTemplateData {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/settings"),
|
||||
version: VERSION,
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
page_data: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn with_data(page_content: &str, page_data: Value) -> Self {
|
||||
fn new(page_content: &str, page_data: Value) -> Self {
|
||||
Self {
|
||||
page_content: String::from(page_content),
|
||||
version: VERSION,
|
||||
page_data: Some(page_data),
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
}
|
||||
@@ -247,7 +242,11 @@ impl AdminTemplateData {
|
||||
}
|
||||
|
||||
fn render_admin_page() -> ApiResult<Html<String>> {
|
||||
let text = AdminTemplateData::new().render()?;
|
||||
let settings_json = json!({
|
||||
"config": CONFIG.prepare_json(),
|
||||
"can_backup": *CAN_BACKUP,
|
||||
});
|
||||
let text = AdminTemplateData::new("admin/settings", settings_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
@@ -314,8 +313,9 @@ fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||
|
||||
#[get("/users")]
|
||||
async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
||||
let mut users_json = Vec::new();
|
||||
for u in User::get_all(&mut conn).await {
|
||||
let users = User::get_all(&mut conn).await;
|
||||
let mut users_json = Vec::with_capacity(users.len());
|
||||
for u in users {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
@@ -327,8 +327,9 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
||||
|
||||
#[get("/users/overview")]
|
||||
async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let mut users_json = Vec::new();
|
||||
for u in User::get_all(&mut conn).await {
|
||||
let users = User::get_all(&mut conn).await;
|
||||
let mut users_json = Vec::with_capacity(users.len());
|
||||
for u in users {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||
@@ -342,10 +343,22 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
||||
users_json.push(usr);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?;
|
||||
let text = AdminTemplateData::new("admin/users", json!(users_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/users/by-mail/<mail>")]
|
||||
async fn get_user_by_mail_json(mail: String, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
if let Some(u) = User::find_by_mail(&mail, &mut conn).await {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>")]
|
||||
async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(&uuid, &mut conn).await?;
|
||||
@@ -356,7 +369,7 @@ async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> Js
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn, ip: ClientIp) -> EmptyResult {
|
||||
async fn delete_user(uuid: String, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
|
||||
// Get the user_org records before deleting the actual user
|
||||
@@ -370,7 +383,7 @@ async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn, ip: Cli
|
||||
user_org.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&ip.ip,
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -380,22 +393,30 @@ async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn, ip: Cli
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
async fn deauth_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
async fn deauth_user(uuid: String, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&mut conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
async fn disable_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
async fn disable_user(uuid: String, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
|
||||
user.save(&mut conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
@@ -422,12 +443,7 @@ struct UserOrgTypeData {
|
||||
}
|
||||
|
||||
#[post("/users/org_type", data = "<data>")]
|
||||
async fn update_user_org_type(
|
||||
data: Json<UserOrgTypeData>,
|
||||
_token: AdminToken,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let data: UserOrgTypeData = data.into_inner();
|
||||
|
||||
let mut user_to_edit =
|
||||
@@ -442,7 +458,7 @@ async fn update_user_org_type(
|
||||
};
|
||||
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there is at least one other confirmed owner
|
||||
// Removing owner permission, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 {
|
||||
err!("Can't change the type of the last owner")
|
||||
}
|
||||
@@ -468,7 +484,7 @@ async fn update_user_org_type(
|
||||
data.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&ip.ip,
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -484,17 +500,21 @@ async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyRes
|
||||
|
||||
#[get("/organizations/overview")]
|
||||
async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let mut organizations_json = Vec::new();
|
||||
for o in Organization::get_all(&mut conn).await {
|
||||
let organizations = Organization::get_all(&mut conn).await;
|
||||
let mut organizations_json = Vec::with_capacity(organizations.len());
|
||||
for o in organizations {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
||||
organizations_json.push(org);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?;
|
||||
let text = AdminTemplateData::new("admin/organizations", json!(organizations_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
@@ -519,10 +539,20 @@ struct GitCommit {
|
||||
sha: String,
|
||||
}
|
||||
|
||||
async fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let github_api = get_reqwest_client();
|
||||
#[derive(Deserialize)]
|
||||
struct TimeApi {
|
||||
year: u16,
|
||||
month: u8,
|
||||
day: u8,
|
||||
hour: u8,
|
||||
minute: u8,
|
||||
seconds: u8,
|
||||
}
|
||||
|
||||
Ok(github_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let json_api = get_reqwest_client();
|
||||
|
||||
Ok(json_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
}
|
||||
|
||||
async fn has_http_access() -> bool {
|
||||
@@ -542,14 +572,13 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
if has_http_access {
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||
match get_json_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await
|
||||
{
|
||||
match get_json_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await {
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
@@ -561,7 +590,7 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
||||
if running_within_docker {
|
||||
"-".to_string()
|
||||
} else {
|
||||
match get_github_api::<GitRelease>(
|
||||
match get_json_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
)
|
||||
.await
|
||||
@@ -576,6 +605,24 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_ntp_time(has_http_access: bool) -> String {
|
||||
if has_http_access {
|
||||
if let Ok(ntp_time) = get_json_api::<TimeApi>("https://www.timeapi.io/api/Time/current/zone?timeZone=UTC").await
|
||||
{
|
||||
return format!(
|
||||
"{year}-{month:02}-{day:02} {hour:02}:{minute:02}:{seconds:02} UTC",
|
||||
year = ntp_time.year,
|
||||
month = ntp_time.month,
|
||||
day = ntp_time.day,
|
||||
hour = ntp_time.hour,
|
||||
minute = ntp_time.minute,
|
||||
seconds = ntp_time.seconds
|
||||
);
|
||||
}
|
||||
}
|
||||
String::from("Unable to fetch NTP time.")
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use chrono::prelude::*;
|
||||
@@ -604,7 +651,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
_ => "Unable to resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) =
|
||||
@@ -617,13 +664,14 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
"current_release": VERSION,
|
||||
"latest_release": latest_release,
|
||||
"latest_commit": latest_commit,
|
||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
||||
"latest_web_build": latest_web_build,
|
||||
"running_within_docker": running_within_docker,
|
||||
"docker_base_image": docker_base_image(),
|
||||
"docker_base_image": if running_within_docker { docker_base_image() } else { "Not applicable" },
|
||||
"has_http_access": has_http_access,
|
||||
"ip_header_exists": &ip_header.0.is_some(),
|
||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||
@@ -634,11 +682,14 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
"db_version": get_sql_server_version(&mut conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"host_arch": std::env::consts::ARCH,
|
||||
"host_os": std::env::consts::OS,
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
|
||||
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
|
||||
});
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/diagnostics", diagnostics_json).render()?;
|
||||
let text = AdminTemplateData::new("admin/diagnostics", diagnostics_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
@@ -668,15 +719,24 @@ async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AdminToken {}
|
||||
pub struct AdminToken {
|
||||
ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for AdminToken {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
if CONFIG.disable_admin_token() {
|
||||
Outcome::Success(Self {})
|
||||
Outcome::Success(Self {
|
||||
ip,
|
||||
})
|
||||
} else {
|
||||
let cookies = request.cookies();
|
||||
|
||||
@@ -685,19 +745,16 @@ impl<'r> FromRequest<'r> for AdminToken {
|
||||
None => return Outcome::Failure((Status::Unauthorized, "Unauthorized")),
|
||||
};
|
||||
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip.ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
if decode_admin(access_token).is_err() {
|
||||
// Remove admin cookie
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
||||
}
|
||||
|
||||
Outcome::Success(Self {})
|
||||
Outcome::Success(Self {
|
||||
ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -6,12 +6,17 @@ use crate::{
|
||||
api::{
|
||||
core::log_user_event, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType,
|
||||
},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, ClientIp, Headers},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
use rocket::{
|
||||
http::Status,
|
||||
request::{FromRequest, Outcome, Request},
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
register,
|
||||
@@ -39,6 +44,8 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
api_key,
|
||||
rotate_api_key,
|
||||
get_known_device,
|
||||
get_known_device_from_path,
|
||||
put_avatar,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -48,6 +55,8 @@ pub struct RegisterData {
|
||||
Email: String,
|
||||
Kdf: Option<i32>,
|
||||
KdfIterations: Option<i32>,
|
||||
KdfMemory: Option<i32>,
|
||||
KdfParallelism: Option<i32>,
|
||||
Key: String,
|
||||
Keys: Option<KeysData>,
|
||||
MasterPasswordHash: String,
|
||||
@@ -152,16 +161,18 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
||||
// Make sure we don't leave a lingering invitation.
|
||||
Invitation::take(&email, &mut conn).await;
|
||||
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
}
|
||||
|
||||
if let Some(client_kdf_type) = data.Kdf {
|
||||
user.client_kdf_type = client_kdf_type;
|
||||
}
|
||||
|
||||
user.set_password(&data.MasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
}
|
||||
|
||||
user.client_kdf_parallelism = data.KdfMemory;
|
||||
user.client_kdf_memory = data.KdfParallelism;
|
||||
|
||||
user.set_password(&data.MasterPasswordHash, Some(data.Key), true, None);
|
||||
user.password_hint = password_hint;
|
||||
|
||||
// Add extra fields if present
|
||||
@@ -228,6 +239,32 @@ async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, mut conn:
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct AvatarData {
|
||||
AvatarColor: Option<String>,
|
||||
}
|
||||
|
||||
#[put("/accounts/avatar", data = "<data>")]
|
||||
async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: AvatarData = data.into_inner().data;
|
||||
|
||||
// It looks like it only supports the 6 hex color format.
|
||||
// If you try to add the short value it will not show that color.
|
||||
// Check and force 7 chars, including the #.
|
||||
if let Some(color) = &data.AvatarColor {
|
||||
if color.len() != 7 {
|
||||
err!("The field AvatarColor must be a HTML/Hex color code with a length of 7 characters")
|
||||
}
|
||||
}
|
||||
|
||||
let mut user = headers.user;
|
||||
user.avatar_color = data.AvatarColor;
|
||||
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>/public-key")]
|
||||
async fn get_public_keys(uuid: String, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&uuid, &mut conn).await {
|
||||
@@ -274,7 +311,7 @@ async fn post_password(
|
||||
data: JsonUpcase<ChangePassData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: ChangePassData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
@@ -286,14 +323,24 @@ async fn post_password(
|
||||
user.password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&user.password_hint)?;
|
||||
|
||||
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn)
|
||||
.await;
|
||||
|
||||
user.set_password(
|
||||
&data.NewMasterPasswordHash,
|
||||
Some(data.Key),
|
||||
true,
|
||||
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
|
||||
);
|
||||
user.akey = data.Key;
|
||||
user.save(&mut conn).await
|
||||
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
// Prevent loging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -301,6 +348,8 @@ async fn post_password(
|
||||
struct ChangeKdfData {
|
||||
Kdf: i32,
|
||||
KdfIterations: i32,
|
||||
KdfMemory: Option<i32>,
|
||||
KdfParallelism: Option<i32>,
|
||||
|
||||
MasterPasswordHash: String,
|
||||
NewMasterPasswordHash: String,
|
||||
@@ -308,7 +357,7 @@ struct ChangeKdfData {
|
||||
}
|
||||
|
||||
#[post("/accounts/kdf", data = "<data>")]
|
||||
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let data: ChangeKdfData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -316,11 +365,39 @@ async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: D
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if data.Kdf == UserKdfType::Pbkdf2 as i32 && data.KdfIterations < 100_000 {
|
||||
err!("PBKDF2 KDF iterations must be at least 100000.")
|
||||
}
|
||||
|
||||
if data.Kdf == UserKdfType::Argon2id as i32 {
|
||||
if data.KdfIterations < 1 {
|
||||
err!("Argon2 KDF iterations must be at least 1.")
|
||||
}
|
||||
if let Some(m) = data.KdfMemory {
|
||||
if !(15..=1024).contains(&m) {
|
||||
err!("Argon2 memory must be between 15 MB and 1024 MB.")
|
||||
}
|
||||
user.client_kdf_memory = data.KdfMemory;
|
||||
} else {
|
||||
err!("Argon2 memory parameter is required.")
|
||||
}
|
||||
if let Some(p) = data.KdfParallelism {
|
||||
if !(1..=16).contains(&p) {
|
||||
err!("Argon2 parallelism must be between 1 and 16.")
|
||||
}
|
||||
user.client_kdf_parallelism = data.KdfParallelism;
|
||||
} else {
|
||||
err!("Argon2 parallelism parameter is required.")
|
||||
}
|
||||
}
|
||||
user.client_kdf_iter = data.KdfIterations;
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.save(&mut conn).await
|
||||
user.set_password(&data.NewMasterPasswordHash, Some(data.Key), true, None);
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -343,19 +420,19 @@ struct KeyData {
|
||||
}
|
||||
|
||||
#[post("/accounts/key", data = "<data>")]
|
||||
async fn post_rotatekey(
|
||||
data: JsonUpcase<KeyData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let data: KeyData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
// Validate the import before continuing
|
||||
// Bitwarden does not process the import if there is one item invalid.
|
||||
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
||||
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||
Cipher::validate_notes(&data.Ciphers)?;
|
||||
|
||||
let user_uuid = &headers.user.uuid;
|
||||
|
||||
// Update folder data
|
||||
@@ -388,7 +465,8 @@ async fn post_rotatekey(
|
||||
|
||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None)
|
||||
// We force the users to logout after the user has been saved to try and prevent these issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None)
|
||||
.await?
|
||||
}
|
||||
|
||||
@@ -399,11 +477,23 @@ async fn post_rotatekey(
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&mut conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
// Prevent loging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/accounts/security-stamp", data = "<data>")]
|
||||
async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn post_sstamp(
|
||||
data: JsonUpcase<PasswordData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -413,7 +503,11 @@ async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, mut conn:
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.save(&mut conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -465,7 +559,12 @@ struct ChangeEmailData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email", data = "<data>")]
|
||||
async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn post_email(
|
||||
data: JsonUpcase<ChangeEmailData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: ChangeEmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -505,10 +604,13 @@ async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, mut con
|
||||
user.email_new = None;
|
||||
user.email_new_token = None;
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.set_password(&data.NewMasterPasswordHash, Some(data.Key), true, None);
|
||||
|
||||
user.save(&mut conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email")]
|
||||
@@ -629,9 +731,9 @@ async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut co
|
||||
}
|
||||
|
||||
#[get("/accounts/revision-date")]
|
||||
fn revision_date(headers: Headers) -> String {
|
||||
fn revision_date(headers: Headers) -> JsonResult {
|
||||
let revision_date = headers.user.updated_at.timestamp_millis();
|
||||
revision_date.to_string()
|
||||
Ok(Json(json!(revision_date)))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -674,7 +776,7 @@ async fn password_hint(data: JsonUpcase<PasswordHintData>, mut conn: DbConn) ->
|
||||
mail::send_password_hint(email, hint).await?;
|
||||
Ok(())
|
||||
} else if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", hint));
|
||||
err!(format!("Your password hint is: {hint}"));
|
||||
} else {
|
||||
err!(NO_HINT);
|
||||
}
|
||||
@@ -696,15 +798,22 @@ async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
pub async fn _prelogin(data: JsonUpcase<PreloginData>, mut conn: DbConn) -> Json<Value> {
|
||||
let data: PreloginData = data.into_inner().data;
|
||||
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||
let (kdf_type, kdf_iter, kdf_mem, kdf_para) = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter, user.client_kdf_memory, user.client_kdf_parallelism),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT, None, None),
|
||||
};
|
||||
|
||||
Json(json!({
|
||||
let mut result = json!({
|
||||
"Kdf": kdf_type,
|
||||
"KdfIterations": kdf_iter
|
||||
}))
|
||||
"KdfIterations": kdf_iter,
|
||||
});
|
||||
|
||||
if kdf_type == UserKdfType::Argon2id as i32 {
|
||||
result["KdfMemory"] = Value::Number(kdf_mem.expect("Argon2 memory parameter is required.").into());
|
||||
result["KdfParallelism"] = Value::Number(kdf_para.expect("Argon2 parallelism parameter is required.").into());
|
||||
}
|
||||
|
||||
Json(result)
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/master/src/Api/Models/Request/Accounts/SecretVerificationRequestModel.cs
|
||||
@@ -732,6 +841,8 @@ async fn _api_key(
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
use crate::util::format_date;
|
||||
|
||||
let data: SecretVerificationRequest = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -746,6 +857,7 @@ async fn _api_key(
|
||||
|
||||
Ok(Json(json!({
|
||||
"ApiKey": user.api_key,
|
||||
"RevisionDate": format_date(&user.updated_at),
|
||||
"Object": "apiKey",
|
||||
})))
|
||||
}
|
||||
@@ -760,15 +872,61 @@ async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: He
|
||||
_api_key(data, true, headers, conn).await
|
||||
}
|
||||
|
||||
// This variant is deprecated: https://github.com/bitwarden/server/pull/2682
|
||||
#[get("/devices/knowndevice/<email>/<uuid>")]
|
||||
async fn get_known_device(email: String, uuid: String, mut conn: DbConn) -> String {
|
||||
async fn get_known_device_from_path(email: String, uuid: String, mut conn: DbConn) -> JsonResult {
|
||||
// This endpoint doesn't have auth header
|
||||
let mut result = false;
|
||||
if let Some(user) = User::find_by_mail(&email, &mut conn).await {
|
||||
match Device::find_by_uuid_and_user(&uuid, &user.uuid, &mut conn).await {
|
||||
Some(_) => String::from("true"),
|
||||
_ => String::from("false"),
|
||||
}
|
||||
} else {
|
||||
String::from("false")
|
||||
result = Device::find_by_uuid_and_user(&uuid, &user.uuid, &mut conn).await.is_some();
|
||||
}
|
||||
Ok(Json(json!(result)))
|
||||
}
|
||||
|
||||
#[get("/devices/knowndevice")]
|
||||
async fn get_known_device(device: KnownDevice, conn: DbConn) -> JsonResult {
|
||||
get_known_device_from_path(device.email, device.uuid, conn).await
|
||||
}
|
||||
|
||||
struct KnownDevice {
|
||||
email: String,
|
||||
uuid: String,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for KnownDevice {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
|
||||
let email_bytes = match data_encoding::BASE64URL.decode(email_b64.as_bytes()) {
|
||||
Ok(bytes) => bytes,
|
||||
Err(_) => {
|
||||
return Outcome::Failure((
|
||||
Status::BadRequest,
|
||||
"X-Request-Email value failed to decode as base64url",
|
||||
));
|
||||
}
|
||||
};
|
||||
match String::from_utf8(email_bytes) {
|
||||
Ok(email) => email,
|
||||
Err(_) => {
|
||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value is required"));
|
||||
};
|
||||
|
||||
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
||||
uuid.to_string()
|
||||
} else {
|
||||
return Outcome::Failure((Status::BadRequest, "X-Device-Identifier value is required"));
|
||||
};
|
||||
|
||||
Outcome::Success(KnownDevice {
|
||||
email,
|
||||
uuid,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -123,7 +123,9 @@ async fn post_emergency_access(
|
||||
|
||||
emergency_access.atype = new_type;
|
||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||
if data.KeyEncrypted.is_some() {
|
||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||
}
|
||||
|
||||
emergency_access.save(&mut conn).await?;
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
@@ -584,13 +586,20 @@ async fn view_emergency_access(emer_id: String, headers: Headers, mut conn: DbCo
|
||||
}
|
||||
|
||||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &mut conn).await;
|
||||
let cipher_sync_data =
|
||||
CipherSyncData::new(&emergency_access.grantor_uuid, &ciphers, CipherSyncType::User, &mut conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(&emergency_access.grantor_uuid, CipherSyncType::User, &mut conn).await;
|
||||
|
||||
let mut ciphers_json = Vec::new();
|
||||
let mut ciphers_json = Vec::with_capacity(ciphers.len());
|
||||
for c in ciphers {
|
||||
ciphers_json
|
||||
.push(c.to_json(&headers.host, &emergency_access.grantor_uuid, Some(&cipher_sync_data), &mut conn).await);
|
||||
ciphers_json.push(
|
||||
c.to_json(
|
||||
&headers.host,
|
||||
&emergency_access.grantor_uuid,
|
||||
Some(&cipher_sync_data),
|
||||
CipherSyncType::User,
|
||||
&mut conn,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -619,12 +628,22 @@ async fn takeover_emergency_access(emer_id: String, headers: Headers, mut conn:
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Kdf": grantor_user.client_kdf_type,
|
||||
"KdfIterations": grantor_user.client_kdf_iter,
|
||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||
"Object": "emergencyAccessTakeover",
|
||||
})))
|
||||
let mut result = json!({
|
||||
"Kdf": grantor_user.client_kdf_type,
|
||||
"KdfIterations": grantor_user.client_kdf_iter,
|
||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||
"Object": "emergencyAccessTakeover",
|
||||
});
|
||||
|
||||
if grantor_user.client_kdf_type == UserKdfType::Argon2id as i32 {
|
||||
result["KdfMemory"] =
|
||||
Value::Number(grantor_user.client_kdf_memory.expect("Argon2 memory parameter is required.").into());
|
||||
result["KdfParallelism"] = Value::Number(
|
||||
grantor_user.client_kdf_parallelism.expect("Argon2 parallelism parameter is required.").into(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -645,7 +664,7 @@ async fn password_emergency_access(
|
||||
|
||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
||||
let key = data.Key;
|
||||
//let key = &data.Key;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
@@ -663,8 +682,7 @@ async fn password_emergency_access(
|
||||
};
|
||||
|
||||
// change grantor_user password
|
||||
grantor_user.set_password(new_master_password_hash, None);
|
||||
grantor_user.akey = key;
|
||||
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None);
|
||||
grantor_user.save(&mut conn).await?;
|
||||
|
||||
// Disable TwoFactor providers since they will otherwise block logins
|
||||
|
@@ -6,7 +6,7 @@ use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcaseVec},
|
||||
auth::{AdminHeaders, ClientIp, Headers},
|
||||
auth::{AdminHeaders, Headers},
|
||||
db::{
|
||||
models::{Cipher, Event, UserOrganization},
|
||||
DbConn, DbPool,
|
||||
@@ -161,12 +161,7 @@ struct EventCollection {
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||
#[post("/collect", format = "application/json", data = "<data>")]
|
||||
async fn post_events_collect(
|
||||
data: JsonUpcaseVec<EventCollection>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -180,7 +175,7 @@ async fn post_events_collect(
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -194,7 +189,7 @@ async fn post_events_collect(
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -211,7 +206,7 @@ async fn post_events_collect(
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
@@ -50,7 +50,7 @@ async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn:
|
||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
||||
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &folder).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
@@ -88,7 +88,7 @@ async fn put_folder(
|
||||
folder.name = data.Name;
|
||||
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
@@ -112,6 +112,6 @@ async fn delete_folder(uuid: String, headers: Headers, mut conn: DbConn, nt: Not
|
||||
// Delete the actual folder entry
|
||||
folder.delete(&mut conn).await?;
|
||||
|
||||
nt.send_folder_update(UpdateType::FolderDelete, &folder).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device.uuid).await;
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -7,8 +7,7 @@ mod organizations;
|
||||
mod sends;
|
||||
pub mod two_factor;
|
||||
|
||||
pub use ciphers::purge_trashed_ciphers;
|
||||
pub use ciphers::{CipherSyncData, CipherSyncType};
|
||||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||
pub use sends::purge_sends;
|
||||
@@ -47,13 +46,11 @@ pub fn events_routes() -> Vec<Route> {
|
||||
//
|
||||
// Move this somewhere else
|
||||
//
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Catcher;
|
||||
use rocket::Route;
|
||||
use rocket::{serde::json::Json, Catcher, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{JsonResult, JsonUpcase},
|
||||
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
error::Error,
|
||||
@@ -138,7 +135,12 @@ struct EquivDomainData {
|
||||
}
|
||||
|
||||
#[post("/settings/domains", data = "<data>")]
|
||||
async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn post_eq_domains(
|
||||
data: JsonUpcase<EquivDomainData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: EquivDomainData = data.into_inner().data;
|
||||
|
||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||
@@ -152,19 +154,25 @@ async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, mu
|
||||
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
nt.send_user_update(UpdateType::SyncSettings, &user).await;
|
||||
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
#[put("/settings/domains", data = "<data>")]
|
||||
async fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn).await
|
||||
async fn put_eq_domains(
|
||||
data: JsonUpcase<EquivDomainData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
async fn hibp_breach(username: String) -> JsonResult {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||
username
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
||||
);
|
||||
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
@@ -186,7 +194,7 @@ async fn hibp_breach(username: String) -> JsonResult {
|
||||
"Domain": "haveibeenpwned.com",
|
||||
"BreachDate": "2019-08-18T00:00:00Z",
|
||||
"AddedDate": "2019-08-18T00:00:00Z",
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
||||
"LogoPath": "vw_static/hibp.png",
|
||||
"PwnCount": 0,
|
||||
"DataClasses": [
|
||||
@@ -229,6 +237,7 @@ fn config() -> Json<Value> {
|
||||
"notifications": format!("{domain}/notifications"),
|
||||
"sso": "",
|
||||
},
|
||||
"object": "config",
|
||||
}))
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -228,19 +228,6 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data
|
||||
{
|
||||
err!("Error reading send file data. Please try an other client.");
|
||||
}
|
||||
|
||||
let size = data.len();
|
||||
if size > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
@@ -339,19 +326,6 @@ async fn post_send_file_v2_data(
|
||||
|
||||
let mut data = data.into_inner();
|
||||
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data.data
|
||||
{
|
||||
err!("Error reading attachment data. Please try an other client.");
|
||||
}
|
||||
|
||||
if let Some(send) = Send::find_by_uuid(&send_uuid, &mut conn).await {
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send_uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
@@ -381,6 +355,7 @@ async fn post_access(
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
@@ -422,6 +397,8 @@ async fn post_access(
|
||||
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json_access(&mut conn).await))
|
||||
}
|
||||
|
||||
@@ -432,6 +409,7 @@ async fn post_access_file(
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
host: Host,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
@@ -470,6 +448,8 @@ async fn post_access_file(
|
||||
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
Ok(Json(json!({
|
||||
@@ -482,7 +462,7 @@ async fn post_access_file(
|
||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||
async fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(&t) {
|
||||
if claims.sub == format!("{}/{}", send_id, file_id) {
|
||||
if claims.sub == format!("{send_id}/{file_id}") {
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
|
||||
}
|
||||
}
|
||||
|
@@ -57,7 +57,6 @@ struct EnableAuthenticatorData {
|
||||
async fn activate_authenticator(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||
@@ -82,11 +81,11 @@ async fn activate_authenticator(
|
||||
}
|
||||
|
||||
// Validate the token provided with the key, and save new twofactor
|
||||
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &mut conn).await?;
|
||||
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &headers.ip, &mut conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -99,10 +98,9 @@ async fn activate_authenticator(
|
||||
async fn activate_authenticator_put(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
activate_authenticator(data, headers, ip, conn).await
|
||||
activate_authenticator(data, headers, conn).await
|
||||
}
|
||||
|
||||
pub async fn validate_totp_code_str(
|
||||
|
@@ -8,7 +8,7 @@ use crate::{
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||
PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType, User},
|
||||
@@ -155,7 +155,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
||||
}
|
||||
|
||||
#[post("/two-factor/duo", data = "<data>")]
|
||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableDuoData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -178,7 +178,7 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
||||
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -190,8 +190,8 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
||||
}
|
||||
|
||||
#[put("/two-factor/duo", data = "<data>")]
|
||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
activate_duo(data, headers, conn, ip).await
|
||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_duo(data, headers, conn).await
|
||||
}
|
||||
|
||||
async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
@@ -270,11 +270,11 @@ pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult
|
||||
let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE);
|
||||
let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE);
|
||||
|
||||
Ok((format!("{}:{}", duo_sign, app_sign), host))
|
||||
Ok((format!("{duo_sign}:{app_sign}"), host))
|
||||
}
|
||||
|
||||
fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64) -> String {
|
||||
let val = format!("{}|{}|{}", email, ikey, expire);
|
||||
let val = format!("{email}|{ikey}|{expire}");
|
||||
let cookie = format!("{}|{}", prefix, BASE64.encode(val.as_bytes()));
|
||||
|
||||
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
|
||||
@@ -327,7 +327,7 @@ fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -
|
||||
let u_b64 = split[1];
|
||||
let u_sig = split[2];
|
||||
|
||||
let sig = crypto::hmac_sign(key, &format!("{}|{}", u_prefix, u_b64));
|
||||
let sig = crypto::hmac_sign(key, &format!("{u_prefix}|{u_b64}"));
|
||||
|
||||
if !crypto::ct_eq(crypto::hmac_sign(key, &sig), crypto::hmac_sign(key, u_sig)) {
|
||||
err!("Duo signatures don't match")
|
||||
|
@@ -7,7 +7,7 @@ use crate::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
@@ -90,7 +90,7 @@ async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: D
|
||||
let twofactor_data = EmailTokenData::from_json(&x.data)?;
|
||||
(true, json!(twofactor_data.email))
|
||||
}
|
||||
_ => (false, json!(null)),
|
||||
_ => (false, serde_json::value::Value::Null),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -150,7 +150,7 @@ struct EmailData {
|
||||
|
||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||
#[put("/two-factor/email", data = "<data>")]
|
||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -180,7 +180,7 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn,
|
||||
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": email_data.email,
|
||||
@@ -304,7 +304,7 @@ pub fn obscure_email(email: &str) -> String {
|
||||
_ => {
|
||||
let stars = "*".repeat(name_size - 2);
|
||||
name.truncate(2);
|
||||
format!("{}{}", name, stars)
|
||||
format!("{name}{stars}")
|
||||
}
|
||||
};
|
||||
|
||||
|
@@ -6,7 +6,7 @@ use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
auth::{ClientHeaders, ClientIp, Headers},
|
||||
auth::{ClientHeaders, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
@@ -73,12 +73,7 @@ struct RecoverTwoFactor {
|
||||
}
|
||||
|
||||
#[post("/two-factor/recover", data = "<data>")]
|
||||
async fn recover(
|
||||
data: JsonUpcase<RecoverTwoFactor>,
|
||||
client_headers: ClientHeaders,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||
let data: RecoverTwoFactor = data.into_inner().data;
|
||||
|
||||
use crate::db::models::User;
|
||||
@@ -102,12 +97,19 @@ async fn recover(
|
||||
// Remove all twofactors from the user
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
|
||||
log_user_event(EventType::UserRecovered2fa as i32, &user.uuid, client_headers.device_type, &ip.ip, &mut conn).await;
|
||||
log_user_event(
|
||||
EventType::UserRecovered2fa as i32,
|
||||
&user.uuid,
|
||||
client_headers.device_type,
|
||||
&client_headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Remove the recovery code, not needed without twofactors
|
||||
user.totp_recover = None;
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(json!({})))
|
||||
Ok(Json(Value::Object(serde_json::Map::new())))
|
||||
}
|
||||
|
||||
async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
||||
@@ -126,12 +128,7 @@ struct DisableTwoFactorData {
|
||||
}
|
||||
|
||||
#[post("/two-factor/disable", data = "<data>")]
|
||||
async fn disable_twofactor(
|
||||
data: JsonUpcase<DisableTwoFactorData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: DisableTwoFactorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
let user = headers.user;
|
||||
@@ -144,7 +141,8 @@ async fn disable_twofactor(
|
||||
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
twofactor.delete(&mut conn).await?;
|
||||
log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn)
|
||||
.await;
|
||||
}
|
||||
|
||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
|
||||
@@ -173,13 +171,8 @@ async fn disable_twofactor(
|
||||
}
|
||||
|
||||
#[put("/two-factor/disable", data = "<data>")]
|
||||
async fn disable_twofactor_put(
|
||||
data: JsonUpcase<DisableTwoFactorData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn, ip).await
|
||||
async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn).await
|
||||
}
|
||||
|
||||
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||
|
@@ -9,7 +9,7 @@ use crate::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
@@ -242,12 +242,7 @@ impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||
}
|
||||
|
||||
#[post("/two-factor/webauthn", data = "<data>")]
|
||||
async fn activate_webauthn(
|
||||
data: JsonUpcase<EnableWebauthnData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableWebauthnData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -286,7 +281,7 @@ async fn activate_webauthn(
|
||||
.await?;
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
@@ -297,13 +292,8 @@ async fn activate_webauthn(
|
||||
}
|
||||
|
||||
#[put("/two-factor/webauthn", data = "<data>")]
|
||||
async fn activate_webauthn_put(
|
||||
data: JsonUpcase<EnableWebauthnData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn, ip).await
|
||||
async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
|
@@ -8,7 +8,7 @@ use crate::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
@@ -47,7 +47,7 @@ fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
|
||||
}
|
||||
|
||||
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
|
||||
let mut result = json!({});
|
||||
let mut result = Value::Object(serde_json::Map::new());
|
||||
|
||||
for (i, key) in yubikeys.into_iter().enumerate() {
|
||||
result[format!("Key{}", i + 1)] = Value::String(key);
|
||||
@@ -118,12 +118,7 @@ async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut
|
||||
}
|
||||
|
||||
#[post("/two-factor/yubikey", data = "<data>")]
|
||||
async fn activate_yubikey(
|
||||
data: JsonUpcase<EnableYubikeyData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableYubikeyData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -169,7 +164,7 @@ async fn activate_yubikey(
|
||||
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
||||
|
||||
@@ -181,13 +176,8 @@ async fn activate_yubikey(
|
||||
}
|
||||
|
||||
#[put("/two-factor/yubikey", data = "<data>")]
|
||||
async fn activate_yubikey_put(
|
||||
data: JsonUpcase<EnableYubikeyData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
activate_yubikey(data, headers, conn, ip).await
|
||||
async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_yubikey(data, headers, conn).await
|
||||
}
|
||||
|
||||
pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult {
|
||||
|
@@ -79,7 +79,7 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
||||
return None;
|
||||
}
|
||||
|
||||
if is_domain_blacklisted(domain).await {
|
||||
if check_domain_blacklist_reason(domain).await.is_some() {
|
||||
return None;
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ fn is_valid_domain(domain: &str) -> bool {
|
||||
const ALLOWED_CHARS: &str = "_-.";
|
||||
|
||||
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||
if let Err(parse_error) = url::Url::parse(format!("https://{}", domain).as_str()) {
|
||||
if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) {
|
||||
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
||||
return false;
|
||||
} else if domain.is_empty()
|
||||
@@ -258,9 +258,15 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum DomainBlacklistReason {
|
||||
Regex,
|
||||
IP,
|
||||
}
|
||||
|
||||
use cached::proc_macro::cached;
|
||||
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
|
||||
async fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
|
||||
// First check the blacklist regex if there is a match.
|
||||
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
@@ -284,7 +290,7 @@ async fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
|
||||
if is_match {
|
||||
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
||||
return true;
|
||||
return Some(DomainBlacklistReason::Regex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -293,13 +299,13 @@ async fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
for addr in s {
|
||||
if !is_global(addr.ip()) {
|
||||
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
|
||||
return true;
|
||||
return Some(DomainBlacklistReason::IP);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
None
|
||||
}
|
||||
|
||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
@@ -564,8 +570,10 @@ async fn get_page(url: &str) -> Result<Response, Error> {
|
||||
}
|
||||
|
||||
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
||||
warn!("Favicon '{}' resolves to a blacklisted domain or IP!", url);
|
||||
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
||||
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
|
||||
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
|
||||
None => (),
|
||||
}
|
||||
|
||||
let mut client = CLIENT.get(url);
|
||||
@@ -575,7 +583,7 @@ async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Err
|
||||
|
||||
match client.send().await {
|
||||
Ok(c) => c.error_for_status().map_err(Into::into),
|
||||
Err(e) => err_silent!(format!("{}", e)),
|
||||
Err(e) => err_silent!(format!("{e}")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -659,8 +667,10 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
|
||||
}
|
||||
|
||||
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
if is_domain_blacklisted(domain).await {
|
||||
err_silent!("Domain is blacklisted", domain)
|
||||
match check_domain_blacklist_reason(domain).await {
|
||||
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
|
||||
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
|
||||
None => (),
|
||||
}
|
||||
|
||||
let icon_result = get_icon_url(domain).await?;
|
||||
@@ -797,7 +807,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
||||
let cookie_store = self.0.read().unwrap();
|
||||
let s = cookie_store
|
||||
.get_request_values(url)
|
||||
.map(|(name, value)| format!("{}={}", name, value))
|
||||
.map(|(name, value)| format!("{name}={value}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("; ");
|
||||
|
||||
|
@@ -25,7 +25,7 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[post("/connect/token", data = "<data>")]
|
||||
async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||
let data: ConnectData = data.into_inner();
|
||||
|
||||
let mut user_uuid: Option<String> = None;
|
||||
@@ -45,14 +45,18 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_password_login(data, &mut user_uuid, &mut conn, &ip).await
|
||||
_password_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
|
||||
}
|
||||
"client_credentials" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
_check_is_some(&data.client_secret, "client_secret cannot be blank")?;
|
||||
_check_is_some(&data.scope, "scope cannot be blank")?;
|
||||
|
||||
_api_key_login(data, &mut user_uuid, &mut conn, &ip).await
|
||||
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_api_key_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
|
||||
}
|
||||
t => err!("Invalid type", t),
|
||||
};
|
||||
@@ -64,14 +68,21 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
||||
EventType::UserLoggedIn as i32,
|
||||
&user_uuid,
|
||||
client_header.device_type,
|
||||
&ip.ip,
|
||||
&client_header.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Err(e) => {
|
||||
if let Some(ev) = e.get_event() {
|
||||
log_user_event(ev.event as i32, &user_uuid, client_header.device_type, &ip.ip, &mut conn).await
|
||||
log_user_event(
|
||||
ev.event as i32,
|
||||
&user_uuid,
|
||||
client_header.device_type,
|
||||
&client_header.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -96,7 +107,7 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
let mut result = json!({
|
||||
"access_token": access_token,
|
||||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
@@ -109,7 +120,16 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
})))
|
||||
});
|
||||
|
||||
if user.client_kdf_type == UserKdfType::Argon2id as i32 {
|
||||
result["KdfMemory"] =
|
||||
Value::Number(user.client_kdf_memory.expect("Argon2 memory parameter is required.").into());
|
||||
result["KdfParallelism"] =
|
||||
Value::Number(user.client_kdf_parallelism.expect("Argon2 parallelism parameter is required.").into());
|
||||
}
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
async fn _password_login(
|
||||
@@ -130,7 +150,7 @@ async fn _password_login(
|
||||
|
||||
// Get the user
|
||||
let username = data.username.as_ref().unwrap().trim();
|
||||
let user = match User::find_by_mail(username, conn).await {
|
||||
let mut user = match User::find_by_mail(username, conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
|
||||
};
|
||||
@@ -150,6 +170,16 @@ async fn _password_login(
|
||||
)
|
||||
}
|
||||
|
||||
// Change the KDF Iterations
|
||||
if user.password_iterations != CONFIG.password_iterations() {
|
||||
user.password_iterations = CONFIG.password_iterations();
|
||||
user.set_password(password, None, false, None);
|
||||
|
||||
if let Err(e) = user.save(conn).await {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
@@ -172,7 +202,6 @@ async fn _password_login(
|
||||
if resend_limit == 0 || user.login_verify_count < resend_limit {
|
||||
// We want to send another email verification if we require signups to verify
|
||||
// their email address, and we haven't sent them a reminder in a while...
|
||||
let mut user = user;
|
||||
user.last_verifying_at = Some(now);
|
||||
user.login_verify_count += 1;
|
||||
|
||||
@@ -240,6 +269,13 @@ async fn _password_login(
|
||||
result["TwoFactorToken"] = Value::String(token);
|
||||
}
|
||||
|
||||
if user.client_kdf_type == UserKdfType::Argon2id as i32 {
|
||||
result["KdfMemory"] =
|
||||
Value::Number(user.client_kdf_memory.expect("Argon2 memory parameter is required.").into());
|
||||
result["KdfParallelism"] =
|
||||
Value::Number(user.client_kdf_parallelism.expect("Argon2 parallelism parameter is required.").into());
|
||||
}
|
||||
|
||||
info!("User {} logged in successfully. IP: {}", username, ip.ip);
|
||||
Ok(Json(result))
|
||||
}
|
||||
@@ -324,7 +360,7 @@ async fn _api_key_login(
|
||||
|
||||
// Note: No refresh_token is returned. The CLI just repeats the
|
||||
// client_credentials login flow when the existing token expires.
|
||||
Ok(Json(json!({
|
||||
let mut result = json!({
|
||||
"access_token": access_token,
|
||||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
@@ -336,7 +372,16 @@ async fn _api_key_login(
|
||||
"ResetMasterPassword": false, // TODO: Same as above
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
})))
|
||||
});
|
||||
|
||||
if user.client_kdf_type == UserKdfType::Argon2id as i32 {
|
||||
result["KdfMemory"] =
|
||||
Value::Number(user.client_kdf_memory.expect("Argon2 memory parameter is required.").into());
|
||||
result["KdfParallelism"] =
|
||||
Value::Number(user.client_kdf_parallelism.expect("Argon2 parallelism parameter is required.").into());
|
||||
}
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
/// Retrieves an existing device or creates a new device from ConnectData and the User
|
||||
|
@@ -10,8 +10,7 @@ use std::{
|
||||
use chrono::NaiveDateTime;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use rmpv::Value;
|
||||
use rocket::{serde::json::Json, Route};
|
||||
use serde_json::Value as JsonValue;
|
||||
use rocket::Route;
|
||||
use tokio::{
|
||||
net::{TcpListener, TcpStream},
|
||||
sync::mpsc::Sender,
|
||||
@@ -23,13 +22,12 @@ use tokio_tungstenite::{
|
||||
|
||||
use crate::{
|
||||
api::EmptyResult,
|
||||
auth::Headers,
|
||||
db::models::{Cipher, Folder, Send, User},
|
||||
Error, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![negotiate, websockets_err]
|
||||
routes![websockets_err]
|
||||
}
|
||||
|
||||
#[get("/hub")]
|
||||
@@ -51,29 +49,6 @@ fn websockets_err() -> EmptyResult {
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/hub/negotiate")]
|
||||
fn negotiate(_headers: Headers) -> Json<JsonValue> {
|
||||
use crate::crypto;
|
||||
use data_encoding::BASE64URL;
|
||||
|
||||
let conn_id = crypto::encode_random_bytes::<16>(BASE64URL);
|
||||
let mut available_transports: Vec<JsonValue> = Vec::new();
|
||||
|
||||
if CONFIG.websocket_enabled() {
|
||||
available_transports.push(json!({"transport":"WebSockets", "transferFormats":["Text","Binary"]}));
|
||||
}
|
||||
|
||||
// TODO: Implement transports
|
||||
// Rocket WS support: https://github.com/SergioBenitez/Rocket/issues/90
|
||||
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
|
||||
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
||||
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
||||
Json(json!({
|
||||
"connectionId": conn_id,
|
||||
"availableTransports": available_transports
|
||||
}))
|
||||
}
|
||||
|
||||
//
|
||||
// Websockets server
|
||||
//
|
||||
@@ -164,12 +139,23 @@ impl WebSocketUsers {
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
ut,
|
||||
None,
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
}
|
||||
|
||||
pub async fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
|
||||
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) {
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
UpdateType::LogOut,
|
||||
acting_device_uuid,
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
}
|
||||
|
||||
pub async fn send_folder_update(&self, ut: UpdateType, folder: &Folder, acting_device_uuid: &String) {
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), folder.uuid.clone().into()),
|
||||
@@ -177,12 +163,19 @@ impl WebSocketUsers {
|
||||
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
||||
],
|
||||
ut,
|
||||
Some(acting_device_uuid.into()),
|
||||
);
|
||||
|
||||
self.send_update(&folder.user_uuid, &data).await;
|
||||
}
|
||||
|
||||
pub async fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &[String]) {
|
||||
pub async fn send_cipher_update(
|
||||
&self,
|
||||
ut: UpdateType,
|
||||
cipher: &Cipher,
|
||||
user_uuids: &[String],
|
||||
acting_device_uuid: &String,
|
||||
) {
|
||||
let user_uuid = convert_option(cipher.user_uuid.clone());
|
||||
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
||||
|
||||
@@ -195,6 +188,7 @@ impl WebSocketUsers {
|
||||
("RevisionDate".into(), serialize_date(cipher.updated_at)),
|
||||
],
|
||||
ut,
|
||||
Some(acting_device_uuid.into()),
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
@@ -212,6 +206,7 @@ impl WebSocketUsers {
|
||||
("RevisionDate".into(), serialize_date(send.revision_date)),
|
||||
],
|
||||
ut,
|
||||
None,
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
@@ -228,14 +223,14 @@ impl WebSocketUsers {
|
||||
"ReceiveMessage", // Target
|
||||
[ // Arguments
|
||||
{
|
||||
"ContextId": "app_id",
|
||||
"ContextId": acting_device_uuid || Nil,
|
||||
"Type": ut as i32,
|
||||
"Payload": {}
|
||||
}
|
||||
]
|
||||
]
|
||||
*/
|
||||
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
|
||||
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uuid: Option<String>) -> Vec<u8> {
|
||||
use rmpv::Value as V;
|
||||
|
||||
let value = V::Array(vec![
|
||||
@@ -244,7 +239,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
|
||||
V::Nil,
|
||||
"ReceiveMessage".into(),
|
||||
V::Array(vec![V::Map(vec![
|
||||
("ContextId".into(), "app_id".into()),
|
||||
("ContextId".into(), acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| V::Nil)),
|
||||
("Type".into(), (ut as i32).into()),
|
||||
("Payload".into(), payload.into()),
|
||||
])]),
|
||||
@@ -260,17 +255,17 @@ fn create_ping() -> Vec<u8> {
|
||||
#[allow(dead_code)]
|
||||
#[derive(Eq, PartialEq)]
|
||||
pub enum UpdateType {
|
||||
CipherUpdate = 0,
|
||||
CipherCreate = 1,
|
||||
LoginDelete = 2,
|
||||
FolderDelete = 3,
|
||||
Ciphers = 4,
|
||||
SyncCipherUpdate = 0,
|
||||
SyncCipherCreate = 1,
|
||||
SyncLoginDelete = 2,
|
||||
SyncFolderDelete = 3,
|
||||
SyncCiphers = 4,
|
||||
|
||||
Vault = 5,
|
||||
OrgKeys = 6,
|
||||
FolderCreate = 7,
|
||||
FolderUpdate = 8,
|
||||
CipherDelete = 9,
|
||||
SyncVault = 5,
|
||||
SyncOrgKeys = 6,
|
||||
SyncFolderCreate = 7,
|
||||
SyncFolderUpdate = 8,
|
||||
SyncCipherDelete = 9,
|
||||
SyncSettings = 10,
|
||||
|
||||
LogOut = 11,
|
||||
@@ -279,6 +274,9 @@ pub enum UpdateType {
|
||||
SyncSendUpdate = 13,
|
||||
SyncSendDelete = 14,
|
||||
|
||||
AuthRequest = 15,
|
||||
AuthRequestResponse = 16,
|
||||
|
||||
None = 100,
|
||||
}
|
||||
|
||||
|
@@ -4,7 +4,7 @@ use rocket::{fs::NamedFile, http::ContentType, response::content::RawHtml as Htm
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{core::now, ApiResult},
|
||||
api::{core::now, ApiResult, EmptyResult},
|
||||
error::Error,
|
||||
util::{Cached, SafeString},
|
||||
CONFIG,
|
||||
@@ -14,9 +14,9 @@ pub fn routes() -> Vec<Route> {
|
||||
// If addding more routes here, consider also adding them to
|
||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||
if CONFIG.web_vault_enabled() {
|
||||
routes![web_index, app_id, web_files, attachments, alive, static_files]
|
||||
routes![web_index, web_index_head, app_id, web_files, attachments, alive, alive_head, static_files]
|
||||
} else {
|
||||
routes![attachments, alive, static_files]
|
||||
routes![attachments, alive, alive_head, static_files]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,17 @@ async fn web_index() -> Cached<Option<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false)
|
||||
}
|
||||
|
||||
#[head("/")]
|
||||
fn web_index_head() -> EmptyResult {
|
||||
// Add an explicit HEAD route to prevent uptime monitoring services from
|
||||
// generating "No matching routes for HEAD /" error messages.
|
||||
//
|
||||
// Rocket automatically implements a HEAD route when there's a matching GET
|
||||
// route, but relying on this behavior also means a spurious error gets
|
||||
// logged due to <https://github.com/SergioBenitez/Rocket/issues/1098>.
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[get("/app-id.json")]
|
||||
fn app_id() -> Cached<(ContentType, Json<Value>)> {
|
||||
let content_type = ContentType::new("application", "fido.trusted-apps+json");
|
||||
@@ -92,6 +103,13 @@ fn alive(_conn: DbConn) -> Json<String> {
|
||||
now()
|
||||
}
|
||||
|
||||
#[head("/alive")]
|
||||
fn alive_head(_conn: DbConn) -> EmptyResult {
|
||||
// Avoid logging spurious "No matching routes for HEAD /alive" errors
|
||||
// due to <https://github.com/SergioBenitez/Rocket/issues/1098>.
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[get("/vw_static/<filename>")]
|
||||
pub fn static_files(filename: String) -> Result<(ContentType, &'static [u8]), Error> {
|
||||
match filename.as_ref() {
|
||||
@@ -102,14 +120,25 @@ pub fn static_files(filename: String) -> Result<(ContentType, &'static [u8]), Er
|
||||
"hibp.png" => Ok((ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
"vaultwarden-icon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png"))),
|
||||
"vaultwarden-favicon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-favicon.png"))),
|
||||
"404.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/404.css"))),
|
||||
"admin.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/admin.css"))),
|
||||
"admin.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin.js"))),
|
||||
"admin_settings.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_settings.js"))),
|
||||
"admin_users.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_users.js"))),
|
||||
"admin_organizations.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_organizations.js")))
|
||||
}
|
||||
"admin_diagnostics.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_diagnostics.js")))
|
||||
}
|
||||
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||
"jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))),
|
||||
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.6.2.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.2.slim.js")))
|
||||
"jquery-3.6.3.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.3.slim.js")))
|
||||
}
|
||||
_ => err!(format!("Static file not found: {}", filename)),
|
||||
_ => err!(format!("Static file not found: {filename}")),
|
||||
}
|
||||
}
|
||||
|
81
src/auth.rs
81
src/auth.rs
@@ -25,16 +25,16 @@ static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.
|
||||
static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.domain_origin()));
|
||||
|
||||
static PRIVATE_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key.\n{}", e))
|
||||
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key.\n{e}"))
|
||||
});
|
||||
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| {
|
||||
EncodingKey::from_rsa_pem(&PRIVATE_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{}", e))
|
||||
EncodingKey::from_rsa_pem(&PRIVATE_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{e}"))
|
||||
});
|
||||
static PUBLIC_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key.\n{}", e))
|
||||
std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key.\n{e}"))
|
||||
});
|
||||
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
|
||||
DecodingKey::from_rsa_pem(&PUBLIC_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{}", e))
|
||||
DecodingKey::from_rsa_pem(&PUBLIC_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{e}"))
|
||||
});
|
||||
|
||||
pub fn load_keys() {
|
||||
@@ -45,7 +45,7 @@ pub fn load_keys() {
|
||||
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
||||
Ok(token) => token,
|
||||
Err(e) => panic!("Error encoding jwt {}", e),
|
||||
Err(e) => panic!("Error encoding jwt {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ pub fn generate_admin_claims() -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(20)).timestamp(),
|
||||
exp: (time_now + Duration::minutes(CONFIG.admin_session_lifetime())).timestamp(),
|
||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||
sub: "admin_panel".to_string(),
|
||||
}
|
||||
@@ -253,7 +253,7 @@ pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(2)).timestamp(),
|
||||
iss: JWT_SEND_ISSUER.to_string(),
|
||||
sub: format!("{}/{}", send_id, file_id),
|
||||
sub: format!("{send_id}/{file_id}"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,7 +306,7 @@ impl<'r> FromRequest<'r> for Host {
|
||||
""
|
||||
};
|
||||
|
||||
format!("{}://{}", protocol, host)
|
||||
format!("{protocol}://{host}")
|
||||
};
|
||||
|
||||
Outcome::Success(Host {
|
||||
@@ -318,6 +318,7 @@ impl<'r> FromRequest<'r> for Host {
|
||||
pub struct ClientHeaders {
|
||||
pub host: String,
|
||||
pub device_type: i32,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -326,6 +327,10 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let host = try_outcome!(Host::from_request(request).await).host;
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
// When unknown or unable to parse, return 14, which is 'Unknown Browser'
|
||||
let device_type: i32 =
|
||||
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
|
||||
@@ -333,6 +338,7 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
||||
Outcome::Success(ClientHeaders {
|
||||
host,
|
||||
device_type,
|
||||
ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -341,6 +347,7 @@ pub struct Headers {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -351,6 +358,10 @@ impl<'r> FromRequest<'r> for Headers {
|
||||
let headers = request.headers();
|
||||
|
||||
let host = try_outcome!(Host::from_request(request).await).host;
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
// Get access_token
|
||||
let access_token: &str = match headers.get_one("Authorization") {
|
||||
@@ -420,6 +431,7 @@ impl<'r> FromRequest<'r> for Headers {
|
||||
host,
|
||||
device,
|
||||
user,
|
||||
ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -431,6 +443,7 @@ pub struct OrgHeaders {
|
||||
pub org_user_type: UserOrgType,
|
||||
pub org_user: UserOrganization,
|
||||
pub org_id: String,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
@@ -491,6 +504,7 @@ impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
},
|
||||
org_user,
|
||||
org_id,
|
||||
ip: headers.ip,
|
||||
})
|
||||
}
|
||||
_ => err_handler!("Error getting the organization id"),
|
||||
@@ -504,6 +518,7 @@ pub struct AdminHeaders {
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub client_version: Option<String>,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -520,6 +535,7 @@ impl<'r> FromRequest<'r> for AdminHeaders {
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
client_version,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Admin or Owner to call this endpoint")
|
||||
@@ -533,6 +549,7 @@ impl From<AdminHeaders> for Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
ip: h.ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -564,6 +581,7 @@ pub struct ManagerHeaders {
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -580,14 +598,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !headers.org_user.has_full_access()
|
||||
&& !Collection::has_access_by_collection_and_user_uuid(
|
||||
&col_id,
|
||||
&headers.org_user.user_uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
{
|
||||
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
||||
err_handler!("The current user isn't a manager for this collection")
|
||||
}
|
||||
}
|
||||
@@ -599,6 +610,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
@@ -612,6 +624,7 @@ impl From<ManagerHeaders> for Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
ip: h.ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -622,7 +635,9 @@ pub struct ManagerHeadersLoose {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user: UserOrganization,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -636,7 +651,9 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user: headers.org_user,
|
||||
org_user_type: headers.org_user_type,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
@@ -650,14 +667,45 @@ impl From<ManagerHeadersLoose> for Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
ip: h.ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
||||
org_user.has_full_access()
|
||||
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
|
||||
}
|
||||
|
||||
impl ManagerHeaders {
|
||||
pub async fn from_loose(
|
||||
h: ManagerHeadersLoose,
|
||||
collections: &Vec<String>,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<ManagerHeaders, Error> {
|
||||
for col_id in collections {
|
||||
if uuid::Uuid::parse_str(col_id).is_err() {
|
||||
err!("Collection Id is malformed!");
|
||||
}
|
||||
if !can_access_collection(&h.org_user, col_id, conn).await {
|
||||
err!("You don't have access to all collections!");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ManagerHeaders {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
org_user_type: h.org_user_type,
|
||||
ip: h.ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OwnerHeaders {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -671,6 +719,7 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Owner to call this endpoint")
|
||||
|
182
src/config.rs
182
src/config.rs
@@ -1,3 +1,4 @@
|
||||
use std::env::consts::EXE_SUFFIX;
|
||||
use std::process::exit;
|
||||
use std::sync::RwLock;
|
||||
|
||||
@@ -13,12 +14,12 @@ use crate::{
|
||||
|
||||
static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
||||
let data_folder = get_env("DATA_FOLDER").unwrap_or_else(|| String::from("data"));
|
||||
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{}/config.json", data_folder))
|
||||
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json"))
|
||||
});
|
||||
|
||||
pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
||||
Config::load().unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n\t{:?}\n", e);
|
||||
println!("Error loading config:\n {e:?}\n");
|
||||
exit(12)
|
||||
})
|
||||
});
|
||||
@@ -60,25 +61,37 @@ macro_rules! make_config {
|
||||
impl ConfigBuilder {
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
fn from_env() -> Self {
|
||||
match dotenvy::from_path(get_env("ENV_FILE").unwrap_or_else(|| String::from(".env"))) {
|
||||
Ok(_) => (),
|
||||
let env_file = get_env("ENV_FILE").unwrap_or_else(|| String::from(".env"));
|
||||
match dotenvy::from_path(&env_file) {
|
||||
Ok(_) => {
|
||||
println!("[INFO] Using environment file `{env_file}` for configuration.\n");
|
||||
},
|
||||
Err(e) => match e {
|
||||
dotenvy::Error::LineParse(msg, pos) => {
|
||||
panic!("Error loading the .env file:\nNear {:?} on position {}\nPlease fix and restart!\n", msg, pos);
|
||||
println!("[ERROR] Failed parsing environment file: `{env_file}`\nNear {msg:?} on position {pos}\nPlease fix and restart!\n");
|
||||
exit(255);
|
||||
},
|
||||
dotenvy::Error::Io(ioerr) => match ioerr.kind() {
|
||||
std::io::ErrorKind::NotFound => {
|
||||
println!("[INFO] No .env file found.\n");
|
||||
// Only exit if this environment variable is set, but the file was not found.
|
||||
// This prevents incorrectly configured environments.
|
||||
if let Some(env_file) = get_env::<String>("ENV_FILE") {
|
||||
println!("[ERROR] The configured ENV_FILE `{env_file}` was not found!\n");
|
||||
exit(255);
|
||||
}
|
||||
},
|
||||
std::io::ErrorKind::PermissionDenied => {
|
||||
println!("[WARNING] Permission Denied while trying to read the .env file!\n");
|
||||
println!("[ERROR] Permission denied while trying to read environment file `{env_file}`!\n");
|
||||
exit(255);
|
||||
},
|
||||
_ => {
|
||||
println!("[WARNING] Reading the .env file failed:\n{:?}\n", ioerr);
|
||||
println!("[ERROR] Reading environment file `{env_file}` failed:\n{ioerr:?}\n");
|
||||
exit(255);
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
println!("[WARNING] Reading the .env file failed:\n{:?}\n", e);
|
||||
println!("[ERROR] Reading environment file `{env_file}` failed:\n{e:?}\n");
|
||||
exit(255);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -93,6 +106,7 @@ macro_rules! make_config {
|
||||
|
||||
fn from_file(path: &str) -> Result<Self, Error> {
|
||||
let config_str = std::fs::read_to_string(path)?;
|
||||
println!("[INFO] Using saved config from `{path}` for configuration.\n");
|
||||
serde_json::from_str(&config_str).map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -112,8 +126,8 @@ macro_rules! make_config {
|
||||
|
||||
if show_overrides && !overrides.is_empty() {
|
||||
// We can't use warn! here because logging isn't setup yet.
|
||||
println!("[WARNING] The following environment variables are being overriden by the config file,");
|
||||
println!("[WARNING] please use the admin panel to make changes to them:");
|
||||
println!("[WARNING] The following environment variables are being overriden by the config.json file.");
|
||||
println!("[WARNING] Please use the admin panel to make changes to them:");
|
||||
println!("[WARNING] {}\n", overrides.join(", "));
|
||||
}
|
||||
|
||||
@@ -128,6 +142,8 @@ macro_rules! make_config {
|
||||
)+)+
|
||||
config.domain_set = _domain_set;
|
||||
|
||||
config.domain = config.domain.trim_end_matches('/').to_string();
|
||||
|
||||
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
|
||||
config.org_creation_users = config.org_creation_users.trim().to_lowercase();
|
||||
|
||||
@@ -450,9 +466,9 @@ make_config! {
|
||||
invitation_expiration_hours: u32, false, def, 120;
|
||||
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
||||
emergency_access_allowed: bool, true, def, true;
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations.
|
||||
/// The changes only apply when a user changes their password. Not recommended to lower the value
|
||||
password_iterations: i32, true, def, 100_000;
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations for the password hash.
|
||||
/// The default for new users. If changed, it will be updated during login for existing users.
|
||||
password_iterations: i32, true, def, 600_000;
|
||||
/// Allow password hints |> Controls whether users can set password hints. This setting applies globally to all users.
|
||||
password_hints_allowed: bool, true, def, true;
|
||||
/// Show password hint |> Controls whether a password hint should be shown directly in the web page
|
||||
@@ -565,6 +581,9 @@ make_config! {
|
||||
/// Max burst size for admin login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `admin_ratelimit_seconds`
|
||||
admin_ratelimit_max_burst: u32, false, def, 3;
|
||||
|
||||
/// Admin session lifetime |> Set the lifetime of admin sessions to this value (in minutes).
|
||||
admin_session_lifetime: i64, true, def, 20;
|
||||
|
||||
/// Enable groups (BETA!) (Know the risks!) |> Enables groups support for organizations (Currently contains known issues!).
|
||||
org_groups_enabled: bool, false, def, false;
|
||||
},
|
||||
@@ -599,6 +618,10 @@ make_config! {
|
||||
smtp: _enable_smtp {
|
||||
/// Enabled
|
||||
_enable_smtp: bool, true, def, true;
|
||||
/// Use Sendmail |> Whether to send mail via the `sendmail` command
|
||||
use_sendmail: bool, true, def, false;
|
||||
/// Sendmail Command |> Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||
sendmail_command: String, true, option;
|
||||
/// Host
|
||||
smtp_host: String, true, option;
|
||||
/// DEPRECATED smtp_ssl |> DEPRECATED - Please use SMTP_SECURITY
|
||||
@@ -638,7 +661,7 @@ make_config! {
|
||||
/// Email 2FA Settings
|
||||
email_2fa: _enable_email_2fa {
|
||||
/// Enabled |> Disabling will prevent users from setting up new email 2FA and using existing email 2FA configured
|
||||
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && c.smtp_host.is_some();
|
||||
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && (c.smtp_host.is_some() || c.use_sendmail);
|
||||
/// Email token size |> Number of digits in an email 2FA token (min: 6, max: 255). Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting.
|
||||
email_token_size: u8, true, def, 6;
|
||||
/// Token expiration time |> Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||
@@ -660,9 +683,19 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.password_iterations < 100_000 {
|
||||
err!("PASSWORD_ITERATIONS should be at least 100000 or higher. The default is 600000!");
|
||||
}
|
||||
|
||||
let limit = 256;
|
||||
if cfg.database_max_conns < 1 || cfg.database_max_conns > limit {
|
||||
err!(format!("`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {}.", limit,));
|
||||
err!(format!("`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {limit}.",));
|
||||
}
|
||||
|
||||
if let Some(log_file) = &cfg.log_file {
|
||||
if std::fs::OpenOptions::new().append(true).create(true).open(log_file).is_err() {
|
||||
err!("Unable to write to log file", log_file);
|
||||
}
|
||||
}
|
||||
|
||||
let dom = cfg.domain.to_lowercase();
|
||||
@@ -698,8 +731,17 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
err!("All Duo options need to be set for global Duo support")
|
||||
}
|
||||
|
||||
if cfg._enable_yubico && cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() {
|
||||
err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` need to be set for Yubikey OTP support")
|
||||
if cfg._enable_yubico {
|
||||
if cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() {
|
||||
err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` must be set for Yubikey OTP support")
|
||||
}
|
||||
|
||||
if let Some(yubico_server) = &cfg.yubico_server {
|
||||
let yubico_server = yubico_server.to_lowercase();
|
||||
if !yubico_server.starts_with("https://") {
|
||||
err!("`YUBICO_SERVER` must be a valid URL and start with 'https://'. Either unset this variable or provide a valid URL.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cfg._enable_smtp {
|
||||
@@ -710,33 +752,68 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
),
|
||||
}
|
||||
|
||||
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
|
||||
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
|
||||
if cfg.use_sendmail {
|
||||
let command = cfg.sendmail_command.clone().unwrap_or_else(|| format!("sendmail{EXE_SUFFIX}"));
|
||||
|
||||
let mut path = std::path::PathBuf::from(&command);
|
||||
|
||||
if !path.is_absolute() {
|
||||
match which::which(&command) {
|
||||
Ok(result) => path = result,
|
||||
Err(_) => err!(format!("sendmail command {command:?} not found in $PATH")),
|
||||
}
|
||||
}
|
||||
|
||||
match path.metadata() {
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
err!(format!("sendmail command not found at `{path:?}`"))
|
||||
}
|
||||
Err(err) => {
|
||||
err!(format!("failed to access sendmail command at `{path:?}`: {err}"))
|
||||
}
|
||||
Ok(metadata) => {
|
||||
if metadata.is_dir() {
|
||||
err!(format!("sendmail command at `{path:?}` isn't a directory"));
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
if !metadata.permissions().mode() & 0o111 != 0 {
|
||||
err!(format!("sendmail command at `{path:?}` isn't executable"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
|
||||
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support without `USE_SENDMAIL`")
|
||||
}
|
||||
|
||||
if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() {
|
||||
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication without `USE_SENDMAIL`")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.smtp_host.is_some() && !cfg.smtp_from.contains('@') {
|
||||
if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !cfg.smtp_from.contains('@') {
|
||||
err!("SMTP_FROM does not contain a mandatory @ sign")
|
||||
}
|
||||
|
||||
if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() {
|
||||
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication")
|
||||
}
|
||||
|
||||
if cfg._enable_email_2fa && (!cfg._enable_smtp || cfg.smtp_host.is_none()) {
|
||||
err!("To enable email 2FA, SMTP must be configured")
|
||||
}
|
||||
|
||||
if cfg._enable_email_2fa && cfg.email_token_size < 6 {
|
||||
err!("`EMAIL_TOKEN_SIZE` has a minimum size of 6")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg._enable_email_2fa && !(cfg.smtp_host.is_some() || cfg.use_sendmail) {
|
||||
err!("To enable email 2FA, a mail transport must be configured")
|
||||
}
|
||||
|
||||
// Check if the icon blacklist regex is valid
|
||||
if let Some(ref r) = cfg.icon_blacklist_regex {
|
||||
let validate_regex = regex::Regex::new(r);
|
||||
match validate_regex {
|
||||
Ok(_) => (),
|
||||
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {:#?}", e)),
|
||||
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {e:#?}")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -746,12 +823,12 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
"internal" | "bitwarden" | "duckduckgo" | "google" => (),
|
||||
_ => {
|
||||
if !icon_service.starts_with("http") {
|
||||
err!(format!("Icon service URL `{}` must start with \"http\"", icon_service))
|
||||
err!(format!("Icon service URL `{icon_service}` must start with \"http\""))
|
||||
}
|
||||
match icon_service.matches("{}").count() {
|
||||
1 => (), // nominal
|
||||
0 => err!(format!("Icon service URL `{}` has no placeholder \"{{}}\"", icon_service)),
|
||||
_ => err!(format!("Icon service URL `{}` has more than one placeholder \"{{}}\"", icon_service)),
|
||||
0 => err!(format!("Icon service URL `{icon_service}` has no placeholder \"{{}}\"")),
|
||||
_ => err!(format!("Icon service URL `{icon_service}` has more than one placeholder \"{{}}\"")),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -795,6 +872,23 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
err!("`EVENT_CLEANUP_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
if !cfg.disable_admin_token {
|
||||
match cfg.admin_token.as_ref() {
|
||||
Some(t) if t.starts_with("$argon2") => {
|
||||
if let Err(e) = argon2::password_hash::PasswordHash::new(t) {
|
||||
err!(format!("The configured Argon2 PHC in `ADMIN_TOKEN` is invalid: '{e}'"))
|
||||
}
|
||||
}
|
||||
Some(_) => {
|
||||
println!(
|
||||
"[NOTICE] You are using a plain text `ADMIN_TOKEN` which is insecure.\n\
|
||||
Please generate a secure Argon2 PHC string by using `vaultwarden hash` or `argon2`.\n\
|
||||
See: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token\n"
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -803,7 +897,7 @@ fn extract_url_origin(url: &str) -> String {
|
||||
match Url::parse(url) {
|
||||
Ok(u) => u.origin().ascii_serialization(),
|
||||
Err(e) => {
|
||||
println!("Error validating domain: {}", e);
|
||||
println!("Error validating domain: {e}");
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
@@ -1011,7 +1105,7 @@ impl Config {
|
||||
}
|
||||
pub fn mail_enabled(&self) -> bool {
|
||||
let inner = &self.inner.read().unwrap().config;
|
||||
inner._enable_smtp && inner.smtp_host.is_some()
|
||||
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
|
||||
}
|
||||
|
||||
pub fn get_duo_akey(&self) -> String {
|
||||
@@ -1086,6 +1180,7 @@ where
|
||||
// Register helpers
|
||||
hb.register_helper("case", Box::new(case_helper));
|
||||
hb.register_helper("jsesc", Box::new(js_escape_helper));
|
||||
hb.register_helper("to_json", Box::new(to_json));
|
||||
|
||||
macro_rules! reg {
|
||||
($name:expr) => {{
|
||||
@@ -1103,6 +1198,7 @@ where
|
||||
reg!("email/email_footer");
|
||||
reg!("email/email_footer_text");
|
||||
|
||||
reg!("email/admin_reset_password", ".html");
|
||||
reg!("email/change_email", ".html");
|
||||
reg!("email/delete_account", ".html");
|
||||
reg!("email/emergency_access_invite_accepted", ".html");
|
||||
@@ -1177,9 +1273,23 @@ fn js_escape_helper<'reg, 'rc>(
|
||||
|
||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||
if !no_quote {
|
||||
escaped_value = format!(""{}"", escaped_value);
|
||||
escaped_value = format!(""{escaped_value}"");
|
||||
}
|
||||
|
||||
out.write(&escaped_value)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn to_json<'reg, 'rc>(
|
||||
h: &Helper<'reg, 'rc>,
|
||||
_r: &'reg Handlebars<'_>,
|
||||
_ctx: &'rc Context,
|
||||
_rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Expected 1 parameter for \"to_json\""))?.value();
|
||||
let json = serde_json::to_string(param)
|
||||
.map_err(|e| RenderError::new(format!("Can't serialize parameter to JSON: {e}")))?;
|
||||
out.write(&json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -75,12 +75,10 @@ macro_rules! generate_connections {
|
||||
#[cfg($name)]
|
||||
impl CustomizeConnection<$ty, diesel::r2d2::Error> for DbConnOptions {
|
||||
fn on_acquire(&self, conn: &mut $ty) -> Result<(), diesel::r2d2::Error> {
|
||||
(|| {
|
||||
if !self.init_stmts.is_empty() {
|
||||
conn.batch_execute(&self.init_stmts)?;
|
||||
}
|
||||
Ok(())
|
||||
})().map_err(diesel::r2d2::Error::QueryError)
|
||||
if !self.init_stmts.is_empty() {
|
||||
conn.batch_execute(&self.init_stmts).map_err(diesel::r2d2::Error::QueryError)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
})+
|
||||
|
||||
@@ -97,7 +95,7 @@ macro_rules! generate_connections {
|
||||
|
||||
impl Drop for DbConn {
|
||||
fn drop(&mut self) {
|
||||
let conn = self.conn.clone();
|
||||
let conn = Arc::clone(&self.conn);
|
||||
let permit = self.permit.take();
|
||||
|
||||
// Since connection can't be on the stack in an async fn during an
|
||||
@@ -143,21 +141,20 @@ macro_rules! generate_connections {
|
||||
}))
|
||||
.build(manager)
|
||||
.map_res("Failed to create pool")?;
|
||||
return Ok(DbPool {
|
||||
Ok(DbPool {
|
||||
pool: Some(DbPoolInner::$name(pool)),
|
||||
semaphore: Arc::new(Semaphore::new(CONFIG.database_max_conns() as usize)),
|
||||
});
|
||||
})
|
||||
}
|
||||
#[cfg(not($name))]
|
||||
#[allow(unreachable_code)]
|
||||
return unreachable!("Trying to use a DB backend when it's feature is disabled");
|
||||
unreachable!("Trying to use a DB backend when it's feature is disabled")
|
||||
},
|
||||
)+ }
|
||||
}
|
||||
// Get a connection from the pool
|
||||
pub async fn get(&self) -> Result<DbConn, Error> {
|
||||
let duration = Duration::from_secs(CONFIG.database_timeout());
|
||||
let permit = match timeout(duration, self.semaphore.clone().acquire_owned()).await {
|
||||
let permit = match timeout(duration, Arc::clone(&self.semaphore).acquire_owned()).await {
|
||||
Ok(p) => p.expect("Semaphore should be open"),
|
||||
Err(_) => {
|
||||
err!("Timeout waiting for database connection");
|
||||
@@ -170,10 +167,10 @@ macro_rules! generate_connections {
|
||||
let pool = p.clone();
|
||||
let c = run_blocking(move || pool.get_timeout(duration)).await.map_res("Error retrieving connection from pool")?;
|
||||
|
||||
return Ok(DbConn {
|
||||
Ok(DbConn {
|
||||
conn: Arc::new(Mutex::new(Some(DbConnInner::$name(c)))),
|
||||
permit: Some(permit)
|
||||
});
|
||||
})
|
||||
},
|
||||
)+ }
|
||||
}
|
||||
@@ -383,7 +380,7 @@ pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy();
|
||||
let file_date = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
diesel::sql_query(format!("VACUUM INTO '{}/db_{}.sqlite3'", db_path, file_date)).execute(conn)?;
|
||||
diesel::sql_query(format!("VACUUM INTO '{db_path}/db_{file_date}.sqlite3'")).execute(conn)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@@ -187,10 +187,15 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_all_by_ciphers(cipher_uuids: &Vec<String>, conn: &mut DbConn) -> Vec<Self> {
|
||||
// This will return all attachments linked to the user or org
|
||||
// There is no filtering done here if the user actually has access!
|
||||
// It is used to speed up the sync process, and the matching is done in a different part.
|
||||
pub async fn find_all_by_user_and_orgs(user_uuid: &str, org_uuids: &Vec<String>, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||
.or_filter(ciphers::organization_uuid.eq_any(org_uuids))
|
||||
.select(attachments::all_columns)
|
||||
.load::<AttachmentDb>(conn)
|
||||
.expect("Error loading attachments")
|
||||
|
@@ -6,7 +6,7 @@ use super::{
|
||||
Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization,
|
||||
};
|
||||
|
||||
use crate::api::core::CipherSyncData;
|
||||
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
@@ -73,6 +73,33 @@ impl Cipher {
|
||||
reprompt: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
|
||||
let mut validation_errors = serde_json::Map::new();
|
||||
for (index, cipher) in cipher_data.iter().enumerate() {
|
||||
if let Some(note) = &cipher.Notes {
|
||||
if note.len() > 10_000 {
|
||||
validation_errors.insert(
|
||||
format!("Ciphers[{index}].Notes"),
|
||||
serde_json::to_value([
|
||||
"The field Notes exceeds the maximum encrypted value length of 10000 characters.",
|
||||
])
|
||||
.unwrap(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if !validation_errors.is_empty() {
|
||||
let err_json = json!({
|
||||
"message": "The model state is invalid.",
|
||||
"validationErrors" : validation_errors,
|
||||
"object": "error"
|
||||
});
|
||||
err_json!(err_json, "Import validation errors")
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
@@ -87,6 +114,7 @@ impl Cipher {
|
||||
host: &str,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
sync_type: CipherSyncType,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
use crate::util::format_date;
|
||||
@@ -107,17 +135,24 @@ impl Cipher {
|
||||
let password_history_json =
|
||||
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
|
||||
let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
(true, true)
|
||||
// We don't need these values at all for Organizational syncs
|
||||
// Skip any other database calls if this is the case and just return false.
|
||||
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
|
||||
match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
(true, true)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(false, false)
|
||||
};
|
||||
|
||||
// Get the type_data or a default to an empty json object '{}'.
|
||||
// If not passing an empty object, mobile clients will crash.
|
||||
let mut type_data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({}));
|
||||
let mut type_data_json: Value =
|
||||
serde_json::from_str(&self.data).unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||
@@ -136,10 +171,10 @@ impl Cipher {
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// data_json should always contain the following keys with every atype
|
||||
data_json["Fields"] = json!(fields_json);
|
||||
data_json["Fields"] = fields_json.clone();
|
||||
data_json["Name"] = json!(self.name);
|
||||
data_json["Notes"] = json!(self.notes);
|
||||
data_json["PasswordHistory"] = json!(password_history_json);
|
||||
data_json["PasswordHistory"] = password_history_json.clone();
|
||||
|
||||
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||
@@ -165,8 +200,6 @@ impl Cipher {
|
||||
"CreationDate": format_date(&self.created_at),
|
||||
"RevisionDate": format_date(&self.updated_at),
|
||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||
"FolderId": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string() ) } else { self.get_folder_uuid(user_uuid, conn).await },
|
||||
"Favorite": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_favorites.contains(&self.uuid) } else { self.is_favorite(user_uuid, conn).await },
|
||||
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||
"OrganizationId": self.organization_uuid,
|
||||
"Attachments": attachments_json,
|
||||
@@ -183,12 +216,6 @@ impl Cipher {
|
||||
|
||||
"Data": data_json,
|
||||
|
||||
// These values are true by default, but can be false if the
|
||||
// cipher belongs to a collection where the org owner has enabled
|
||||
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
||||
"Edit": !read_only,
|
||||
"ViewPassword": !hide_passwords,
|
||||
|
||||
"PasswordHistory": password_history_json,
|
||||
|
||||
// All Cipher types are included by default as null, but only the matching one will be populated
|
||||
@@ -198,6 +225,27 @@ impl Cipher {
|
||||
"Identity": null,
|
||||
});
|
||||
|
||||
// These values are only needed for user/default syncs
|
||||
// Not during an organizational sync like `get_org_details`
|
||||
// Skip adding these fields in that case
|
||||
if sync_type == CipherSyncType::User {
|
||||
json_object["FolderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
|
||||
} else {
|
||||
self.get_folder_uuid(user_uuid, conn).await
|
||||
});
|
||||
json_object["Favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
cipher_sync_data.cipher_favorites.contains(&self.uuid)
|
||||
} else {
|
||||
self.is_favorite(user_uuid, conn).await
|
||||
});
|
||||
// These values are true by default, but can be false if the
|
||||
// cipher belongs to a collection or group where the org owner has enabled
|
||||
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
||||
json_object["Edit"] = json!(!read_only);
|
||||
json_object["ViewPassword"] = json!(!hide_passwords);
|
||||
}
|
||||
|
||||
let key = match self.atype {
|
||||
1 => "Login",
|
||||
2 => "SecureNote",
|
||||
@@ -713,6 +761,7 @@ impl Cipher {
|
||||
.or_filter(groups::access_all.eq(true)) //Access via group
|
||||
.or_filter(collections_groups::collections_uuid.is_not_null()) //Access via group
|
||||
.select(ciphers_collections::all_columns)
|
||||
.distinct()
|
||||
.load::<(String, String)>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
|
@@ -64,6 +64,8 @@ impl Collection {
|
||||
Some(_) => {
|
||||
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
|
||||
(uc.read_only, uc.hide_passwords)
|
||||
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
|
||||
(cg.read_only, cg.hide_passwords)
|
||||
} else {
|
||||
(false, false)
|
||||
}
|
||||
@@ -232,6 +234,17 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0)
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
@@ -287,47 +300,95 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
None => false, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
return true;
|
||||
}
|
||||
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(&self.uuid))
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
.filter(users_collections::read_only.eq(false))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
}
|
||||
let user_uuid = user_uuid.to_string();
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(&self.uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::read_only.eq(false)).or(// Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null().and(
|
||||
collections_groups::read_only.eq(false))
|
||||
)
|
||||
)
|
||||
)
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
None => true, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
return false;
|
||||
}
|
||||
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(&self.uuid))
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
.filter(users_collections::hide_passwords.eq(true))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
}
|
||||
let user_uuid = user_uuid.to_string();
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(&self.uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::hide_passwords.eq(true)).or(// Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null().and(
|
||||
collections_groups::hide_passwords.eq(true))
|
||||
)
|
||||
)
|
||||
)
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -346,6 +407,19 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid)))
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords))
|
||||
.load::<CollectionUserDb>(conn)
|
||||
.expect("Error loading users_collections")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn save(
|
||||
user_uuid: &str,
|
||||
collection_uuid: &str,
|
||||
@@ -429,6 +503,21 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_collection_swap_user_uuid_with_org_user_uuid(
|
||||
collection_uuid: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords))
|
||||
.load::<CollectionUserDb>(conn)
|
||||
.expect("Error loading users_collections")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_collection_and_user(
|
||||
collection_uuid: &str,
|
||||
user_uuid: &str,
|
||||
|
@@ -87,9 +87,9 @@ pub enum EventType {
|
||||
OrganizationUserRemoved = 1503,
|
||||
OrganizationUserUpdatedGroups = 1504,
|
||||
// OrganizationUserUnlinkedSso = 1505, // Not supported
|
||||
// OrganizationUserResetPasswordEnroll = 1506, // Not supported
|
||||
// OrganizationUserResetPasswordWithdraw = 1507, // Not supported
|
||||
// OrganizationUserAdminResetPassword = 1508, // Not supported
|
||||
OrganizationUserResetPasswordEnroll = 1506,
|
||||
OrganizationUserResetPasswordWithdraw = 1507,
|
||||
OrganizationUserAdminResetPassword = 1508,
|
||||
// OrganizationUserResetSsoLink = 1509, // Not supported
|
||||
// OrganizationUserFirstSsoLogin = 1510, // Not supported
|
||||
OrganizationUserRevoked = 1511,
|
||||
@@ -263,6 +263,17 @@ impl Event {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
event::table
|
||||
.filter(event::org_uuid.eq(org_uuid))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0)
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_org_and_user_org(
|
||||
org_uuid: &str,
|
||||
user_org_uuid: &str,
|
||||
|
@@ -64,7 +64,32 @@ impl Group {
|
||||
"AccessAll": self.access_all,
|
||||
"ExternalId": self.external_id,
|
||||
"CreationDate": format_date(&self.creation_date),
|
||||
"RevisionDate": format_date(&self.revision_date)
|
||||
"RevisionDate": format_date(&self.revision_date),
|
||||
"Object": "group"
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
|
||||
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|entry| {
|
||||
json!({
|
||||
"Id": entry.collections_uuid,
|
||||
"ReadOnly": entry.read_only,
|
||||
"HidePasswords": entry.hide_passwords
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"OrganizationId": self.organizations_uuid,
|
||||
"Name": self.name,
|
||||
"AccessAll": self.access_all,
|
||||
"ExternalId": self.external_id,
|
||||
"Collections": collections_groups,
|
||||
"Object": "groupDetails"
|
||||
})
|
||||
}
|
||||
|
||||
@@ -151,6 +176,13 @@ impl Group {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
for group in Self::find_by_organization(org_uuid, conn).await {
|
||||
group.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn find_by_organization(organizations_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
groups::table
|
||||
@@ -161,6 +193,17 @@ impl Group {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(organizations_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
groups::table
|
||||
.filter(groups::organizations_uuid.eq(organizations_uuid))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0)
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
groups::table
|
||||
|
@@ -28,4 +28,4 @@ pub use self::organization::{Organization, UserOrgStatus, UserOrgType, UserOrgan
|
||||
pub use self::send::{Send, SendType};
|
||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||
pub use self::two_factor_incomplete::TwoFactorIncomplete;
|
||||
pub use self::user::{Invitation, User, UserStampException};
|
||||
pub use self::user::{Invitation, User, UserKdfType, UserStampException};
|
||||
|
@@ -32,7 +32,7 @@ pub enum OrgPolicyType {
|
||||
PersonalOwnership = 5,
|
||||
DisableSend = 6,
|
||||
SendOptions = 7,
|
||||
// ResetPassword = 8, // Not supported
|
||||
ResetPassword = 8,
|
||||
// MaximumVaultTimeout = 9, // Not supported (Not AGPLv3 Licensed)
|
||||
// DisablePersonalVaultExport = 10, // Not supported (Not AGPLv3 Licensed)
|
||||
}
|
||||
@@ -44,6 +44,13 @@ pub struct SendOptionsPolicyData {
|
||||
pub DisableHideEmail: bool,
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct ResetPasswordDataModel {
|
||||
pub AutoEnrollEnabled: bool,
|
||||
}
|
||||
|
||||
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -298,6 +305,20 @@ impl OrgPolicy {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
|
||||
Some(policy) => match serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data) {
|
||||
Ok(opts) => {
|
||||
return opts.data.AutoEnrollEnabled;
|
||||
}
|
||||
_ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data),
|
||||
},
|
||||
None => return false,
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns true if the user belongs to an org that has enabled the `DisableHideEmail`
|
||||
/// option of the `Send Options` policy, and the user is not an owner or admin of that org.
|
||||
pub async fn is_hide_email_disabled(user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
|
@@ -2,7 +2,7 @@ use num_traits::FromPrimitive;
|
||||
use serde_json::Value;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use super::{CollectionUser, GroupUser, OrgPolicy, OrgPolicyType, User};
|
||||
use super::{CollectionUser, Group, GroupUser, OrgPolicy, OrgPolicyType, TwoFactor, User};
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
@@ -29,6 +29,7 @@ db_object! {
|
||||
pub akey: String,
|
||||
pub status: i32,
|
||||
pub atype: i32,
|
||||
pub reset_password_key: Option<String>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,7 +159,7 @@ impl Organization {
|
||||
"SelfHost": true,
|
||||
"UseApi": false, // Not supported
|
||||
"HasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
|
||||
"UseResetPassword": false, // Not supported
|
||||
"UseResetPassword": CONFIG.mail_enabled(),
|
||||
|
||||
"BusinessName": null,
|
||||
"BusinessAddress1": null,
|
||||
@@ -194,6 +195,7 @@ impl UserOrganization {
|
||||
akey: String::new(),
|
||||
status: UserOrgStatus::Accepted as i32,
|
||||
atype: UserOrgType::User as i32,
|
||||
reset_password_key: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -265,6 +267,7 @@ impl Organization {
|
||||
Collection::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
UserOrganization::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
Group::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
||||
@@ -311,7 +314,8 @@ impl UserOrganization {
|
||||
"UseApi": false, // Not supported
|
||||
"SelfHost": true,
|
||||
"HasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
|
||||
"ResetPasswordEnrolled": false, // Not supported
|
||||
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
|
||||
"UseResetPassword": CONFIG.mail_enabled(),
|
||||
"SsoBound": false, // Not supported
|
||||
"UseSso": false, // Not supported
|
||||
"ProviderId": null,
|
||||
@@ -322,7 +326,7 @@ impl UserOrganization {
|
||||
// TODO: Add support for Custom User Roles
|
||||
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
||||
// "Permissions": {
|
||||
// "AccessEventLogs": false, // Not supported
|
||||
// "AccessEventLogs": false,
|
||||
// "AccessImportExport": false,
|
||||
// "AccessReports": false,
|
||||
// "ManageAllCollections": false,
|
||||
@@ -333,9 +337,9 @@ impl UserOrganization {
|
||||
// "editAssignedCollections": false,
|
||||
// "deleteAssignedCollections": false,
|
||||
// "ManageCiphers": false,
|
||||
// "ManageGroups": false, // Not supported
|
||||
// "ManageGroups": false,
|
||||
// "ManagePolicies": false,
|
||||
// "ManageResetPassword": false, // Not supported
|
||||
// "ManageResetPassword": false,
|
||||
// "ManageSso": false, // Not supported
|
||||
// "ManageUsers": false,
|
||||
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||
@@ -354,7 +358,12 @@ impl UserOrganization {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn to_json_user_details(&self, conn: &mut DbConn) -> Value {
|
||||
pub async fn to_json_user_details(
|
||||
&self,
|
||||
include_collections: bool,
|
||||
include_groups: bool,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap();
|
||||
|
||||
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
|
||||
@@ -365,15 +374,45 @@ impl UserOrganization {
|
||||
self.status
|
||||
};
|
||||
|
||||
let twofactor_enabled = !TwoFactor::find_by_user(&user.uuid, conn).await.is_empty();
|
||||
|
||||
let groups: Vec<String> = if include_groups && CONFIG.org_groups_enabled() {
|
||||
GroupUser::find_by_user(&self.uuid, conn).await.iter().map(|gu| gu.groups_uuid.clone()).collect()
|
||||
} else {
|
||||
// The Bitwarden clients seem to call this API regardless of whether groups are enabled,
|
||||
// so just act as if there are no groups.
|
||||
Vec::with_capacity(0)
|
||||
};
|
||||
|
||||
let collections: Vec<Value> = if include_collections {
|
||||
CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|cu| {
|
||||
json!({
|
||||
"Id": cu.collection_uuid,
|
||||
"ReadOnly": cu.read_only,
|
||||
"HidePasswords": cu.hide_passwords,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
Vec::with_capacity(0)
|
||||
};
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"UserId": self.user_uuid,
|
||||
"Name": user.name,
|
||||
"Email": user.email,
|
||||
"Groups": groups,
|
||||
"Collections": collections,
|
||||
|
||||
"Status": status,
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
"TwoFactorEnabled": twofactor_enabled,
|
||||
"ResetPasswordEnrolled":self.reset_password_key.is_some(),
|
||||
|
||||
"Object": "organizationUserUserDetails",
|
||||
})
|
||||
|
@@ -44,8 +44,12 @@ db_object! {
|
||||
|
||||
pub client_kdf_type: i32,
|
||||
pub client_kdf_iter: i32,
|
||||
pub client_kdf_memory: Option<i32>,
|
||||
pub client_kdf_parallelism: Option<i32>,
|
||||
|
||||
pub api_key: Option<String>,
|
||||
|
||||
pub avatar_color: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
@@ -56,6 +60,11 @@ db_object! {
|
||||
}
|
||||
}
|
||||
|
||||
pub enum UserKdfType {
|
||||
Pbkdf2 = 0,
|
||||
Argon2id = 1,
|
||||
}
|
||||
|
||||
enum UserStatus {
|
||||
Enabled = 0,
|
||||
Invited = 1,
|
||||
@@ -71,8 +80,8 @@ pub struct UserStampException {
|
||||
|
||||
/// Local methods
|
||||
impl User {
|
||||
pub const CLIENT_KDF_TYPE_DEFAULT: i32 = 0; // PBKDF2: 0
|
||||
pub const CLIENT_KDF_ITER_DEFAULT: i32 = 100_000;
|
||||
pub const CLIENT_KDF_TYPE_DEFAULT: i32 = UserKdfType::Pbkdf2 as i32;
|
||||
pub const CLIENT_KDF_ITER_DEFAULT: i32 = 600_000;
|
||||
|
||||
pub fn new(email: String) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
@@ -111,8 +120,12 @@ impl User {
|
||||
|
||||
client_kdf_type: Self::CLIENT_KDF_TYPE_DEFAULT,
|
||||
client_kdf_iter: Self::CLIENT_KDF_ITER_DEFAULT,
|
||||
client_kdf_memory: None,
|
||||
client_kdf_parallelism: None,
|
||||
|
||||
api_key: None,
|
||||
|
||||
avatar_color: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,18 +156,31 @@ impl User {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `password` - A str which contains a hashed version of the users master password.
|
||||
/// * `new_key` - A String which contains the new aKey value of the users master password.
|
||||
/// * `allow_next_route` - A Option<Vec<String>> with the function names of the next allowed (rocket) routes.
|
||||
/// These routes are able to use the previous stamp id for the next 2 minutes.
|
||||
/// After these 2 minutes this stamp will expire.
|
||||
///
|
||||
pub fn set_password(&mut self, password: &str, allow_next_route: Option<Vec<String>>) {
|
||||
pub fn set_password(
|
||||
&mut self,
|
||||
password: &str,
|
||||
new_key: Option<String>,
|
||||
reset_security_stamp: bool,
|
||||
allow_next_route: Option<Vec<String>>,
|
||||
) {
|
||||
self.password_hash = crypto::hash_password(password.as_bytes(), &self.salt, self.password_iterations as u32);
|
||||
|
||||
if let Some(route) = allow_next_route {
|
||||
self.set_stamp_exception(route);
|
||||
}
|
||||
|
||||
self.reset_security_stamp()
|
||||
if let Some(new_key) = new_key {
|
||||
self.akey = new_key;
|
||||
}
|
||||
|
||||
if reset_security_stamp {
|
||||
self.reset_security_stamp()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset_security_stamp(&mut self) {
|
||||
@@ -226,6 +252,7 @@ impl User {
|
||||
"Providers": [],
|
||||
"ProviderOrganizations": [],
|
||||
"ForcePasswordReset": false,
|
||||
"AvatarColor": self.avatar_color,
|
||||
"Object": "profile",
|
||||
})
|
||||
}
|
||||
|
@@ -199,7 +199,10 @@ table! {
|
||||
excluded_globals -> Text,
|
||||
client_kdf_type -> Integer,
|
||||
client_kdf_iter -> Integer,
|
||||
client_kdf_memory -> Nullable<Integer>,
|
||||
client_kdf_parallelism -> Nullable<Integer>,
|
||||
api_key -> Nullable<Text>,
|
||||
avatar_color -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,6 +224,7 @@ table! {
|
||||
akey -> Text,
|
||||
status -> Integer,
|
||||
atype -> Integer,
|
||||
reset_password_key -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -199,7 +199,10 @@ table! {
|
||||
excluded_globals -> Text,
|
||||
client_kdf_type -> Integer,
|
||||
client_kdf_iter -> Integer,
|
||||
client_kdf_memory -> Nullable<Integer>,
|
||||
client_kdf_parallelism -> Nullable<Integer>,
|
||||
api_key -> Nullable<Text>,
|
||||
avatar_color -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,6 +224,7 @@ table! {
|
||||
akey -> Text,
|
||||
status -> Integer,
|
||||
atype -> Integer,
|
||||
reset_password_key -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -199,7 +199,10 @@ table! {
|
||||
excluded_globals -> Text,
|
||||
client_kdf_type -> Integer,
|
||||
client_kdf_iter -> Integer,
|
||||
client_kdf_memory -> Nullable<Integer>,
|
||||
client_kdf_parallelism -> Nullable<Integer>,
|
||||
api_key -> Nullable<Text>,
|
||||
avatar_color -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,6 +224,7 @@ table! {
|
||||
akey -> Text,
|
||||
status -> Integer,
|
||||
atype -> Integer,
|
||||
reset_password_key -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
|
111
src/mail.rs
111
src/mail.rs
@@ -8,7 +8,7 @@ use lettre::{
|
||||
transport::smtp::authentication::{Credentials, Mechanism as SmtpAuthMechanism},
|
||||
transport::smtp::client::{Tls, TlsParameters},
|
||||
transport::smtp::extension::ClientId,
|
||||
Address, AsyncSmtpTransport, AsyncTransport, Tokio1Executor,
|
||||
Address, AsyncSendmailTransport, AsyncSmtpTransport, AsyncTransport, Tokio1Executor,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -21,7 +21,15 @@ use crate::{
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
fn mailer() -> AsyncSmtpTransport<Tokio1Executor> {
|
||||
fn sendmail_transport() -> AsyncSendmailTransport<Tokio1Executor> {
|
||||
if let Some(command) = CONFIG.sendmail_command() {
|
||||
AsyncSendmailTransport::new_with_command(command)
|
||||
} else {
|
||||
AsyncSendmailTransport::new()
|
||||
}
|
||||
}
|
||||
|
||||
fn smtp_transport() -> AsyncSmtpTransport<Tokio1Executor> {
|
||||
use std::time::Duration;
|
||||
let host = CONFIG.smtp_host().unwrap();
|
||||
|
||||
@@ -88,7 +96,7 @@ fn mailer() -> AsyncSmtpTransport<Tokio1Executor> {
|
||||
}
|
||||
|
||||
fn get_text(template_name: &'static str, data: serde_json::Value) -> Result<(String, String, String), Error> {
|
||||
let (subject_html, body_html) = get_template(&format!("{}.html", template_name), &data)?;
|
||||
let (subject_html, body_html) = get_template(&format!("{template_name}.html"), &data)?;
|
||||
let (_subject_text, body_text) = get_template(template_name, &data)?;
|
||||
Ok((subject_html, body_html, body_text))
|
||||
}
|
||||
@@ -245,6 +253,7 @@ pub async fn send_invite(
|
||||
"org_id": org_id.as_deref().unwrap_or("_"),
|
||||
"org_user_id": org_user_id.as_deref().unwrap_or("_"),
|
||||
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
|
||||
"org_name_encoded": percent_encode(org_name.as_bytes(), NON_ALPHANUMERIC).to_string(),
|
||||
"org_name": org_name,
|
||||
"token": invite_token,
|
||||
}),
|
||||
@@ -495,6 +504,71 @@ pub async fn send_test(address: &str) -> EmptyResult {
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub async fn send_admin_reset_password(address: &str, user_name: &str, org_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/admin_reset_password",
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"img_src": CONFIG._smtp_img_src(),
|
||||
"user_name": user_name,
|
||||
"org_name": org_name,
|
||||
}),
|
||||
)?;
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
async fn send_with_selected_transport(email: Message) -> EmptyResult {
|
||||
if CONFIG.use_sendmail() {
|
||||
match sendmail_transport().send(email).await {
|
||||
Ok(_) => Ok(()),
|
||||
// Match some common errors and make them more user friendly
|
||||
Err(e) => {
|
||||
if e.is_client() {
|
||||
debug!("Sendmail client error: {:#?}", e);
|
||||
err!(format!("Sendmail client error: {e}"));
|
||||
} else if e.is_response() {
|
||||
debug!("Sendmail response error: {:#?}", e);
|
||||
err!(format!("Sendmail response error: {e}"));
|
||||
} else {
|
||||
debug!("Sendmail error: {:#?}", e);
|
||||
err!(format!("Sendmail error: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match smtp_transport().send(email).await {
|
||||
Ok(_) => Ok(()),
|
||||
// Match some common errors and make them more user friendly
|
||||
Err(e) => {
|
||||
if e.is_client() {
|
||||
debug!("SMTP client error: {:#?}", e);
|
||||
err!(format!("SMTP client error: {e}"));
|
||||
} else if e.is_transient() {
|
||||
debug!("SMTP 4xx error: {:#?}", e);
|
||||
err!(format!("SMTP 4xx error: {e}"));
|
||||
} else if e.is_permanent() {
|
||||
debug!("SMTP 5xx error: {:#?}", e);
|
||||
let mut msg = e.to_string();
|
||||
// Add a special check for 535 to add a more descriptive message
|
||||
if msg.contains("(535)") {
|
||||
msg = format!("{msg} - Authentication credentials invalid");
|
||||
}
|
||||
err!(format!("SMTP 5xx error: {msg}"));
|
||||
} else if e.is_timeout() {
|
||||
debug!("SMTP timeout error: {:#?}", e);
|
||||
err!(format!("SMTP timeout error: {e}"));
|
||||
} else if e.is_tls() {
|
||||
debug!("SMTP encryption error: {:#?}", e);
|
||||
err!(format!("SMTP encryption error: {e}"));
|
||||
} else {
|
||||
debug!("SMTP error: {:#?}", e);
|
||||
err!(format!("SMTP error: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_email(address: &str, subject: &str, body_html: String, body_text: String) -> EmptyResult {
|
||||
let smtp_from = &CONFIG.smtp_from();
|
||||
|
||||
@@ -524,34 +598,5 @@ async fn send_email(address: &str, subject: &str, body_html: String, body_text:
|
||||
.subject(subject)
|
||||
.multipart(body)?;
|
||||
|
||||
match mailer().send(email).await {
|
||||
Ok(_) => Ok(()),
|
||||
// Match some common errors and make them more user friendly
|
||||
Err(e) => {
|
||||
if e.is_client() {
|
||||
debug!("SMTP Client error: {:#?}", e);
|
||||
err!(format!("SMTP Client error: {}", e));
|
||||
} else if e.is_transient() {
|
||||
debug!("SMTP 4xx error: {:#?}", e);
|
||||
err!(format!("SMTP 4xx error: {}", e));
|
||||
} else if e.is_permanent() {
|
||||
debug!("SMTP 5xx error: {:#?}", e);
|
||||
let mut msg = e.to_string();
|
||||
// Add a special check for 535 to add a more descriptive message
|
||||
if msg.contains("(535)") {
|
||||
msg = format!("{} - Authentication credentials invalid", msg);
|
||||
}
|
||||
err!(format!("SMTP 5xx error: {}", msg));
|
||||
} else if e.is_timeout() {
|
||||
debug!("SMTP timeout error: {:#?}", e);
|
||||
err!(format!("SMTP timeout error: {}", e));
|
||||
} else if e.is_tls() {
|
||||
debug!("SMTP Encryption error: {:#?}", e);
|
||||
err!(format!("SMTP Encryption error: {}", e));
|
||||
} else {
|
||||
debug!("SMTP {:#?}", e);
|
||||
err!(format!("SMTP {}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
send_with_selected_transport(email).await
|
||||
}
|
||||
|
151
src/main.rs
151
src/main.rs
@@ -34,7 +34,7 @@
|
||||
// The more key/value pairs there are the more recursion occurs.
|
||||
// We want to keep this as low as possible, but not higher then 128.
|
||||
// If you go above 128 it will cause rust-analyzer to fail,
|
||||
#![recursion_limit = "94"]
|
||||
#![recursion_limit = "103"]
|
||||
|
||||
// When enabled use MiMalloc as malloc instead of the default malloc
|
||||
#[cfg(feature = "enable_mimalloc")]
|
||||
@@ -111,21 +111,29 @@ async fn main() -> Result<(), Error> {
|
||||
create_dir(&CONFIG.attachments_folder(), "attachments folder");
|
||||
|
||||
let pool = create_db_pool().await;
|
||||
schedule_jobs(pool.clone()).await;
|
||||
schedule_jobs(pool.clone());
|
||||
crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&mut pool.get().await.unwrap()).await.unwrap();
|
||||
|
||||
launch_rocket(pool, extra_debug).await // Blocks until program termination.
|
||||
}
|
||||
|
||||
const HELP: &str = "\
|
||||
Alternative implementation of the Bitwarden server API written in Rust
|
||||
Alternative implementation of the Bitwarden server API written in Rust
|
||||
|
||||
USAGE:
|
||||
vaultwarden
|
||||
USAGE:
|
||||
vaultwarden [FLAGS|COMMAND]
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
-v, --version Prints the app version
|
||||
|
||||
COMMAND:
|
||||
hash [--preset {bitwarden|owasp}] Generate an Argon2id PHC ADMIN_TOKEN
|
||||
|
||||
PRESETS: m= t= p=
|
||||
bitwarden (default) 64MiB, 3 Iterations, 4 Threads
|
||||
owasp 19MiB, 2 Iterations, 1 Thread
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
-v, --version Prints the app version
|
||||
";
|
||||
|
||||
pub const VERSION: Option<&str> = option_env!("VW_VERSION");
|
||||
@@ -135,31 +143,95 @@ fn parse_args() {
|
||||
let version = VERSION.unwrap_or("(Version info from Git not present)");
|
||||
|
||||
if pargs.contains(["-h", "--help"]) {
|
||||
println!("vaultwarden {}", version);
|
||||
print!("{}", HELP);
|
||||
println!("vaultwarden {version}");
|
||||
print!("{HELP}");
|
||||
exit(0);
|
||||
} else if pargs.contains(["-v", "--version"]) {
|
||||
println!("vaultwarden {}", version);
|
||||
println!("vaultwarden {version}");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
if let Some(command) = pargs.subcommand().unwrap_or_default() {
|
||||
if command == "hash" {
|
||||
use argon2::{
|
||||
password_hash::SaltString, Algorithm::Argon2id, Argon2, ParamsBuilder, PasswordHasher, Version::V0x13,
|
||||
};
|
||||
|
||||
let mut argon2_params = ParamsBuilder::new();
|
||||
let preset: Option<String> = pargs.opt_value_from_str(["-p", "--preset"]).unwrap_or_default();
|
||||
let selected_preset;
|
||||
match preset.as_deref() {
|
||||
Some("owasp") => {
|
||||
selected_preset = "owasp";
|
||||
argon2_params.m_cost(19456);
|
||||
argon2_params.t_cost(2);
|
||||
argon2_params.p_cost(1);
|
||||
}
|
||||
_ => {
|
||||
// Bitwarden preset is the default
|
||||
selected_preset = "bitwarden";
|
||||
argon2_params.m_cost(65540);
|
||||
argon2_params.t_cost(3);
|
||||
argon2_params.p_cost(4);
|
||||
}
|
||||
}
|
||||
|
||||
println!("Generate an Argon2id PHC string using the '{selected_preset}' preset:\n");
|
||||
|
||||
let password = rpassword::prompt_password("Password: ").unwrap();
|
||||
if password.len() < 8 {
|
||||
println!("\nPassword must contain at least 8 characters");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let password_verify = rpassword::prompt_password("Confirm Password: ").unwrap();
|
||||
if password != password_verify {
|
||||
println!("\nPasswords do not match");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let argon2 = Argon2::new(Argon2id, V0x13, argon2_params.build().unwrap());
|
||||
let salt = SaltString::encode_b64(&crate::crypto::get_random_bytes::<32>()).unwrap();
|
||||
|
||||
let argon2_timer = tokio::time::Instant::now();
|
||||
if let Ok(password_hash) = argon2.hash_password(password.as_bytes(), &salt) {
|
||||
println!(
|
||||
"\n\
|
||||
ADMIN_TOKEN='{password_hash}'\n\n\
|
||||
Generation of the Argon2id PHC string took: {:?}",
|
||||
argon2_timer.elapsed()
|
||||
);
|
||||
} else {
|
||||
error!("Unable to generate Argon2id PHC hash.");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
fn launch_info() {
|
||||
println!("/--------------------------------------------------------------------\\");
|
||||
println!("| Starting Vaultwarden |");
|
||||
println!(
|
||||
"\
|
||||
/--------------------------------------------------------------------\\\n\
|
||||
| Starting Vaultwarden |"
|
||||
);
|
||||
|
||||
if let Some(version) = VERSION {
|
||||
println!("|{:^68}|", format!("Version {}", version));
|
||||
println!("|{:^68}|", format!("Version {version}"));
|
||||
}
|
||||
|
||||
println!("|--------------------------------------------------------------------|");
|
||||
println!("| This is an *unofficial* Bitwarden implementation, DO NOT use the |");
|
||||
println!("| official channels to report bugs/features, regardless of client. |");
|
||||
println!("| Send usage/configuration questions or feature requests to: |");
|
||||
println!("| https://vaultwarden.discourse.group/ |");
|
||||
println!("| Report suspected bugs/issues in the software itself at: |");
|
||||
println!("| https://github.com/dani-garcia/vaultwarden/issues/new |");
|
||||
println!("\\--------------------------------------------------------------------/\n");
|
||||
println!(
|
||||
"\
|
||||
|--------------------------------------------------------------------|\n\
|
||||
| This is an *unofficial* Bitwarden implementation, DO NOT use the |\n\
|
||||
| official channels to report bugs/features, regardless of client. |\n\
|
||||
| Send usage/configuration questions or feature requests to: |\n\
|
||||
| https://github.com/dani-garcia/vaultwarden/discussions or |\n\
|
||||
| https://vaultwarden.discourse.group/ |\n\
|
||||
| Report suspected bugs/issues in the software itself at: |\n\
|
||||
| https://github.com/dani-garcia/vaultwarden/issues/new |\n\
|
||||
\\--------------------------------------------------------------------/\n"
|
||||
);
|
||||
}
|
||||
|
||||
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
@@ -178,6 +250,14 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
log::LevelFilter::Off
|
||||
};
|
||||
|
||||
// Only show rocket underscore `_` logs when the level is Debug or higher
|
||||
// Else this will bloat the log output with useless messages.
|
||||
let rocket_underscore_level = if level >= log::LevelFilter::Debug {
|
||||
log::LevelFilter::Warn
|
||||
} else {
|
||||
log::LevelFilter::Off
|
||||
};
|
||||
|
||||
let mut logger = fern::Dispatch::new()
|
||||
.level(level)
|
||||
// Hide unknown certificate errors if using self-signed
|
||||
@@ -185,7 +265,7 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
// Hide failed to close stream messages
|
||||
.level_for("hyper::server", log::LevelFilter::Warn)
|
||||
// Silence rocket logs
|
||||
.level_for("_", log::LevelFilter::Warn)
|
||||
.level_for("_", rocket_underscore_level)
|
||||
.level_for("rocket::launch", log::LevelFilter::Error)
|
||||
.level_for("rocket::launch_", log::LevelFilter::Error)
|
||||
.level_for("rocket::rocket", log::LevelFilter::Warn)
|
||||
@@ -197,7 +277,8 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
// Prevent cookie_store logs
|
||||
.level_for("cookie_store", log::LevelFilter::Off)
|
||||
// Variable level for trust-dns used by reqwest
|
||||
.level_for("trust_dns_proto", trust_dns_level)
|
||||
.level_for("trust_dns_resolver::name_server::name_server", trust_dns_level)
|
||||
.level_for("trust_dns_proto::xfer", trust_dns_level)
|
||||
.level_for("diesel_logger", diesel_logger_level)
|
||||
.chain(std::io::stdout());
|
||||
|
||||
@@ -205,9 +286,9 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
// This can contain sensitive information we do not want in the default debug/trace logging.
|
||||
if CONFIG.smtp_debug() {
|
||||
println!(
|
||||
"[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!"
|
||||
"[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!\n\
|
||||
[WARNING] Only enable SMTP_DEBUG during troubleshooting!\n"
|
||||
);
|
||||
println!("[WARNING] Only enable SMTP_DEBUG during troubleshooting!\n");
|
||||
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Debug)
|
||||
} else {
|
||||
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Off)
|
||||
@@ -224,7 +305,7 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
))
|
||||
});
|
||||
} else {
|
||||
logger = logger.format(|out, message, _| out.finish(format_args!("{}", message)));
|
||||
logger = logger.format(|out, message, _| out.finish(format_args!("{message}")));
|
||||
}
|
||||
|
||||
if let Some(log_file) = CONFIG.log_file() {
|
||||
@@ -253,12 +334,12 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
},
|
||||
};
|
||||
|
||||
let backtrace = backtrace::Backtrace::new();
|
||||
let backtrace = std::backtrace::Backtrace::force_capture();
|
||||
|
||||
match info.location() {
|
||||
Some(location) => {
|
||||
error!(
|
||||
target: "panic", "thread '{}' panicked at '{}': {}:{}\n{:?}",
|
||||
target: "panic", "thread '{}' panicked at '{}': {}:{}\n{:}",
|
||||
thread,
|
||||
msg,
|
||||
location.file(),
|
||||
@@ -268,7 +349,7 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
}
|
||||
None => error!(
|
||||
target: "panic",
|
||||
"thread '{}' panicked at '{}'\n{:?}",
|
||||
"thread '{}' panicked at '{}'\n{:}",
|
||||
thread,
|
||||
msg,
|
||||
backtrace
|
||||
@@ -299,7 +380,7 @@ fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
||||
|
||||
fn create_dir(path: &str, description: &str) {
|
||||
// Try to create the specified dir, if it doesn't already exist.
|
||||
let err_msg = format!("Error creating {} directory '{}'", description, path);
|
||||
let err_msg = format!("Error creating {description} directory '{path}'");
|
||||
create_dir_all(path).expect(&err_msg);
|
||||
}
|
||||
|
||||
@@ -467,7 +548,7 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn schedule_jobs(pool: db::DbPool) {
|
||||
fn schedule_jobs(pool: db::DbPool) {
|
||||
if CONFIG.job_poll_interval_ms() == 0 {
|
||||
info!("Job scheduler disabled.");
|
||||
return;
|
||||
@@ -543,9 +624,7 @@ async fn schedule_jobs(pool: db::DbPool) {
|
||||
// tick, the one that was added earlier will run first.
|
||||
loop {
|
||||
sched.tick();
|
||||
runtime.block_on(async move {
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(CONFIG.job_poll_interval_ms())).await
|
||||
});
|
||||
runtime.block_on(tokio::time::sleep(tokio::time::Duration::from_millis(CONFIG.job_poll_interval_ms())));
|
||||
}
|
||||
})
|
||||
.expect("Error spawning job scheduler thread");
|
||||
|
26
src/static/scripts/404.css
vendored
Normal file
26
src/static/scripts/404.css
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
body {
|
||||
padding-top: 75px;
|
||||
}
|
||||
.vaultwarden-icon {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
height: 32px;
|
||||
width: auto;
|
||||
margin: -5px 0 0 0;
|
||||
}
|
||||
.footer {
|
||||
padding: 40px 0 40px 0;
|
||||
border-top: 1px solid #dee2e6;
|
||||
}
|
||||
.container {
|
||||
max-width: 980px;
|
||||
}
|
||||
.content {
|
||||
padding-top: 20px;
|
||||
padding-bottom: 20px;
|
||||
padding-left: 15px;
|
||||
padding-right: 15px;
|
||||
}
|
||||
.vw-404 {
|
||||
max-width: 500px; width: 100%;
|
||||
}
|
56
src/static/scripts/admin.css
vendored
Normal file
56
src/static/scripts/admin.css
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
body {
|
||||
padding-top: 75px;
|
||||
}
|
||||
img {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
}
|
||||
.vaultwarden-icon {
|
||||
height: 32px;
|
||||
width: auto;
|
||||
margin: -5px 0 0 0;
|
||||
}
|
||||
/* Special alert-row class to use Bootstrap v5.2+ variable colors */
|
||||
.alert-row {
|
||||
--bs-alert-border: 1px solid var(--bs-alert-border-color);
|
||||
color: var(--bs-alert-color);
|
||||
background-color: var(--bs-alert-bg);
|
||||
border: var(--bs-alert-border);
|
||||
}
|
||||
|
||||
#users-table .vw-account-details {
|
||||
min-width: 250px;
|
||||
}
|
||||
#users-table .vw-created-at, #users-table .vw-last-active {
|
||||
min-width: 85px;
|
||||
max-width: 85px;
|
||||
}
|
||||
#users-table .vw-ciphers, #orgs-table .vw-users, #orgs-table .vw-ciphers {
|
||||
min-width: 35px;
|
||||
max-width: 40px;
|
||||
}
|
||||
#orgs-table .vw-misc {
|
||||
min-width: 65px;
|
||||
max-width: 80px;
|
||||
}
|
||||
#users-table .vw-attachments, #orgs-table .vw-attachments {
|
||||
min-width: 100px;
|
||||
max-width: 130px;
|
||||
}
|
||||
#users-table .vw-actions, #orgs-table .vw-actions {
|
||||
min-width: 130px;
|
||||
max-width: 130px;
|
||||
}
|
||||
#users-table .vw-org-cell {
|
||||
max-height: 120px;
|
||||
}
|
||||
#orgs-table .vw-org-details {
|
||||
min-width: 285px;
|
||||
}
|
||||
|
||||
#support-string {
|
||||
height: 16rem;
|
||||
}
|
||||
.vw-copy-toast {
|
||||
width: 15rem;
|
||||
}
|
77
src/static/scripts/admin.js
vendored
Normal file
77
src/static/scripts/admin.js
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
"use strict";
|
||||
/* eslint-env es2017, browser */
|
||||
/* exported BASE_URL, _post */
|
||||
|
||||
function getBaseUrl() {
|
||||
// If the base URL is `https://vaultwarden.example.com/base/path/`,
|
||||
// `window.location.href` should have one of the following forms:
|
||||
//
|
||||
// - `https://vaultwarden.example.com/base/path/`
|
||||
// - `https://vaultwarden.example.com/base/path/#/some/route[?queryParam=...]`
|
||||
//
|
||||
// We want to get to just `https://vaultwarden.example.com/base/path`.
|
||||
const baseUrl = window.location.href;
|
||||
const adminPos = baseUrl.indexOf("/admin");
|
||||
return baseUrl.substring(0, adminPos != -1 ? adminPos : baseUrl.length);
|
||||
}
|
||||
const BASE_URL = getBaseUrl();
|
||||
|
||||
function reload() {
|
||||
// Reload the page by setting the exact same href
|
||||
// Using window.location.reload() could cause a repost.
|
||||
window.location = window.location.href;
|
||||
}
|
||||
|
||||
function msg(text, reload_page = true) {
|
||||
text && alert(text);
|
||||
reload_page && reload();
|
||||
}
|
||||
|
||||
function _post(url, successMsg, errMsg, body, reload_page = true) {
|
||||
let respStatus;
|
||||
let respStatusText;
|
||||
fetch(url, {
|
||||
method: "POST",
|
||||
body: body,
|
||||
mode: "same-origin",
|
||||
credentials: "same-origin",
|
||||
headers: { "Content-Type": "application/json" }
|
||||
}).then( resp => {
|
||||
if (resp.ok) {
|
||||
msg(successMsg, reload_page);
|
||||
// Abuse the catch handler by setting error to false and continue
|
||||
return Promise.reject({error: false});
|
||||
}
|
||||
respStatus = resp.status;
|
||||
respStatusText = resp.statusText;
|
||||
return resp.text();
|
||||
}).then( respText => {
|
||||
try {
|
||||
const respJson = JSON.parse(respText);
|
||||
if (respJson.ErrorModel && respJson.ErrorModel.Message) {
|
||||
return respJson.ErrorModel.Message;
|
||||
} else {
|
||||
return Promise.reject({body:`${respStatus} - ${respStatusText}\n\nUnknown error`, error: true});
|
||||
}
|
||||
} catch (e) {
|
||||
return Promise.reject({body:`${respStatus} - ${respStatusText}\n\n[Catch] ${e}`, error: true});
|
||||
}
|
||||
}).then( apiMsg => {
|
||||
msg(`${errMsg}\n${apiMsg}`, reload_page);
|
||||
}).catch( e => {
|
||||
if (e.error === false) { return true; }
|
||||
else { msg(`${errMsg}\n${e.body}`, reload_page); }
|
||||
});
|
||||
}
|
||||
|
||||
// onLoad events
|
||||
document.addEventListener("DOMContentLoaded", (/*event*/) => {
|
||||
// get current URL path and assign "active" class to the correct nav-item
|
||||
const pathname = window.location.pathname;
|
||||
if (pathname === "") return;
|
||||
const navItem = document.querySelectorAll(`.navbar-nav .nav-item a[href="${pathname}"]`);
|
||||
if (navItem.length === 1) {
|
||||
navItem[0].className = navItem[0].className + " active";
|
||||
navItem[0].setAttribute("aria-current", "page");
|
||||
}
|
||||
});
|
241
src/static/scripts/admin_diagnostics.js
vendored
Normal file
241
src/static/scripts/admin_diagnostics.js
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
"use strict";
|
||||
/* eslint-env es2017, browser */
|
||||
/* global BASE_URL:readable, BSN:readable */
|
||||
|
||||
var dnsCheck = false;
|
||||
var timeCheck = false;
|
||||
var ntpTimeCheck = false;
|
||||
var domainCheck = false;
|
||||
var httpsCheck = false;
|
||||
|
||||
// ================================
|
||||
// Date & Time Check
|
||||
const d = new Date();
|
||||
const year = d.getUTCFullYear();
|
||||
const month = String(d.getUTCMonth()+1).padStart(2, "0");
|
||||
const day = String(d.getUTCDate()).padStart(2, "0");
|
||||
const hour = String(d.getUTCHours()).padStart(2, "0");
|
||||
const minute = String(d.getUTCMinutes()).padStart(2, "0");
|
||||
const seconds = String(d.getUTCSeconds()).padStart(2, "0");
|
||||
const browserUTC = `${year}-${month}-${day} ${hour}:${minute}:${seconds} UTC`;
|
||||
|
||||
// ================================
|
||||
// Check if the output is a valid IP
|
||||
const isValidIp = value => (/^(?:(?:^|\.)(?:2(?:5[0-5]|[0-4]\d)|1?\d?\d)){4}$/.test(value) ? true : false);
|
||||
|
||||
function checkVersions(platform, installed, latest, commit=null) {
|
||||
if (installed === "-" || latest === "-") {
|
||||
document.getElementById(`${platform}-failed`).classList.remove("d-none");
|
||||
return;
|
||||
}
|
||||
|
||||
// Only check basic versions, no commit revisions
|
||||
if (commit === null || installed.indexOf("-") === -1) {
|
||||
if (installed !== latest) {
|
||||
document.getElementById(`${platform}-warning`).classList.remove("d-none");
|
||||
} else {
|
||||
document.getElementById(`${platform}-success`).classList.remove("d-none");
|
||||
}
|
||||
} else {
|
||||
// Check if this is a branched version.
|
||||
const branchRegex = /(?:\s)\((.*?)\)/;
|
||||
const branchMatch = installed.match(branchRegex);
|
||||
if (branchMatch !== null) {
|
||||
document.getElementById(`${platform}-branch`).classList.remove("d-none");
|
||||
}
|
||||
|
||||
// This will remove branch info and check if there is a commit hash
|
||||
const installedRegex = /(\d+\.\d+\.\d+)-(\w+)/;
|
||||
const instMatch = installed.match(installedRegex);
|
||||
|
||||
// It could be that a new tagged version has the same commit hash.
|
||||
// In this case the version is the same but only the number is different
|
||||
if (instMatch !== null) {
|
||||
if (instMatch[2] === commit) {
|
||||
// The commit hashes are the same, so latest version is installed
|
||||
document.getElementById(`${platform}-success`).classList.remove("d-none");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (installed === latest) {
|
||||
document.getElementById(`${platform}-success`).classList.remove("d-none");
|
||||
} else {
|
||||
document.getElementById(`${platform}-warning`).classList.remove("d-none");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ================================
|
||||
// Generate support string to be pasted on github or the forum
|
||||
async function generateSupportString(event, dj) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
|
||||
let supportString = "### Your environment (Generated via diagnostics page)\n";
|
||||
|
||||
supportString += `* Vaultwarden version: v${dj.current_release}\n`;
|
||||
supportString += `* Web-vault version: v${dj.web_vault_version}\n`;
|
||||
supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`;
|
||||
supportString += `* Running within Docker: ${dj.running_within_docker} (Base: ${dj.docker_base_image})\n`;
|
||||
supportString += "* Environment settings overridden: ";
|
||||
if (dj.overrides != "") {
|
||||
supportString += "true\n";
|
||||
} else {
|
||||
supportString += "false\n";
|
||||
}
|
||||
supportString += `* Uses a reverse proxy: ${dj.ip_header_exists}\n`;
|
||||
if (dj.ip_header_exists) {
|
||||
supportString += `* IP Header check: ${dj.ip_header_match} (${dj.ip_header_name})\n`;
|
||||
}
|
||||
supportString += `* Internet access: ${dj.has_http_access}\n`;
|
||||
supportString += `* Internet access via a proxy: ${dj.uses_proxy}\n`;
|
||||
supportString += `* DNS Check: ${dnsCheck}\n`;
|
||||
supportString += `* Browser/Server Time Check: ${timeCheck}\n`;
|
||||
supportString += `* Server/NTP Time Check: ${ntpTimeCheck}\n`;
|
||||
supportString += `* Domain Configuration Check: ${domainCheck}\n`;
|
||||
supportString += `* HTTPS Check: ${httpsCheck}\n`;
|
||||
supportString += `* Database type: ${dj.db_type}\n`;
|
||||
supportString += `* Database version: ${dj.db_version}\n`;
|
||||
supportString += "* Clients used: \n";
|
||||
supportString += "* Reverse proxy and version: \n";
|
||||
supportString += "* Other relevant information: \n";
|
||||
|
||||
const jsonResponse = await fetch(`${BASE_URL}/admin/diagnostics/config`, {
|
||||
"headers": { "Accept": "application/json" }
|
||||
});
|
||||
if (!jsonResponse.ok) {
|
||||
alert("Generation failed: " + jsonResponse.statusText);
|
||||
throw new Error(jsonResponse);
|
||||
}
|
||||
const configJson = await jsonResponse.json();
|
||||
supportString += "\n### Config (Generated via diagnostics page)\n<details><summary>Show Running Config</summary>\n";
|
||||
supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
|
||||
supportString += "\n\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n</details>\n";
|
||||
|
||||
document.getElementById("support-string").innerText = supportString;
|
||||
document.getElementById("support-string").classList.remove("d-none");
|
||||
document.getElementById("copy-support").classList.remove("d-none");
|
||||
}
|
||||
|
||||
function copyToClipboard(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
|
||||
const supportStr = document.getElementById("support-string").innerText;
|
||||
const tmpCopyEl = document.createElement("textarea");
|
||||
|
||||
tmpCopyEl.setAttribute("id", "copy-support-string");
|
||||
tmpCopyEl.setAttribute("readonly", "");
|
||||
tmpCopyEl.value = supportStr;
|
||||
tmpCopyEl.style.position = "absolute";
|
||||
tmpCopyEl.style.left = "-9999px";
|
||||
document.body.appendChild(tmpCopyEl);
|
||||
tmpCopyEl.select();
|
||||
document.execCommand("copy");
|
||||
tmpCopyEl.remove();
|
||||
|
||||
new BSN.Toast("#toastClipboardCopy").show();
|
||||
}
|
||||
|
||||
function checkTimeDrift(utcTimeA, utcTimeB, statusPrefix) {
|
||||
const timeDrift = (
|
||||
Date.parse(utcTimeA.replace(" ", "T").replace(" UTC", "")) -
|
||||
Date.parse(utcTimeB.replace(" ", "T").replace(" UTC", ""))
|
||||
) / 1000;
|
||||
if (timeDrift > 15 || timeDrift < -15) {
|
||||
document.getElementById(`${statusPrefix}-warning`).classList.remove("d-none");
|
||||
return false;
|
||||
} else {
|
||||
document.getElementById(`${statusPrefix}-success`).classList.remove("d-none");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
function checkDomain(browserURL, serverURL) {
|
||||
if (serverURL == browserURL) {
|
||||
document.getElementById("domain-success").classList.remove("d-none");
|
||||
domainCheck = true;
|
||||
} else {
|
||||
document.getElementById("domain-warning").classList.remove("d-none");
|
||||
}
|
||||
|
||||
// Check for HTTPS at domain-server-string
|
||||
if (serverURL.startsWith("https://") ) {
|
||||
document.getElementById("https-success").classList.remove("d-none");
|
||||
httpsCheck = true;
|
||||
} else {
|
||||
document.getElementById("https-warning").classList.remove("d-none");
|
||||
}
|
||||
}
|
||||
|
||||
function initVersionCheck(dj) {
|
||||
const serverInstalled = dj.current_release;
|
||||
const serverLatest = dj.latest_release;
|
||||
const serverLatestCommit = dj.latest_commit;
|
||||
|
||||
if (serverInstalled.indexOf("-") !== -1 && serverLatest !== "-" && serverLatestCommit !== "-") {
|
||||
document.getElementById("server-latest-commit").classList.remove("d-none");
|
||||
}
|
||||
checkVersions("server", serverInstalled, serverLatest, serverLatestCommit);
|
||||
|
||||
if (!dj.running_within_docker) {
|
||||
const webInstalled = dj.web_vault_version;
|
||||
const webLatest = dj.latest_web_build;
|
||||
checkVersions("web", webInstalled, webLatest);
|
||||
}
|
||||
}
|
||||
|
||||
function checkDns(dns_resolved) {
|
||||
if (isValidIp(dns_resolved)) {
|
||||
document.getElementById("dns-success").classList.remove("d-none");
|
||||
dnsCheck = true;
|
||||
} else {
|
||||
document.getElementById("dns-warning").classList.remove("d-none");
|
||||
}
|
||||
}
|
||||
|
||||
function init(dj) {
|
||||
// Time check
|
||||
document.getElementById("time-browser-string").innerText = browserUTC;
|
||||
|
||||
// Check if we were able to fetch a valid NTP Time
|
||||
// If so, compare both browser and server with NTP
|
||||
// Else, compare browser and server.
|
||||
if (dj.ntp_time.indexOf("UTC") !== -1) {
|
||||
timeCheck = checkTimeDrift(dj.server_time, browserUTC, "time");
|
||||
checkTimeDrift(dj.ntp_time, browserUTC, "ntp-browser");
|
||||
ntpTimeCheck = checkTimeDrift(dj.ntp_time, dj.server_time, "ntp-server");
|
||||
} else {
|
||||
timeCheck = checkTimeDrift(dj.server_time, browserUTC, "time");
|
||||
ntpTimeCheck = "n/a";
|
||||
}
|
||||
|
||||
// Domain check
|
||||
const browserURL = location.href.toLowerCase();
|
||||
document.getElementById("domain-browser-string").innerText = browserURL;
|
||||
checkDomain(browserURL, dj.admin_url.toLowerCase());
|
||||
|
||||
// Version check
|
||||
initVersionCheck(dj);
|
||||
|
||||
// DNS Check
|
||||
checkDns(dj.dns_resolved);
|
||||
}
|
||||
|
||||
// onLoad events
|
||||
document.addEventListener("DOMContentLoaded", (event) => {
|
||||
const diag_json = JSON.parse(document.getElementById("diagnostics_json").innerText);
|
||||
init(diag_json);
|
||||
|
||||
const btnGenSupport = document.getElementById("gen-support");
|
||||
if (btnGenSupport) {
|
||||
btnGenSupport.addEventListener("click", () => {
|
||||
generateSupportString(event, diag_json);
|
||||
});
|
||||
}
|
||||
const btnCopySupport = document.getElementById("copy-support");
|
||||
if (btnCopySupport) {
|
||||
btnCopySupport.addEventListener("click", copyToClipboard);
|
||||
}
|
||||
});
|
70
src/static/scripts/admin_organizations.js
vendored
Normal file
70
src/static/scripts/admin_organizations.js
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
"use strict";
|
||||
/* eslint-env es2017, browser, jquery */
|
||||
/* global _post:readable, BASE_URL:readable, reload:readable, jdenticon:readable */
|
||||
|
||||
function deleteOrganization(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
const org_uuid = event.target.dataset.vwOrgUuid;
|
||||
const org_name = event.target.dataset.vwOrgName;
|
||||
const billing_email = event.target.dataset.vwBillingEmail;
|
||||
if (!org_uuid) {
|
||||
alert("Required parameters not found!");
|
||||
return false;
|
||||
}
|
||||
|
||||
// First make sure the user wants to delete this organization
|
||||
const continueDelete = confirm(`WARNING: All data of this organization (${org_name}) will be lost!\nMake sure you have a backup, this cannot be undone!`);
|
||||
if (continueDelete == true) {
|
||||
const input_org_uuid = prompt(`To delete the organization "${org_name} (${billing_email})", please type the organization uuid below.`);
|
||||
if (input_org_uuid != null) {
|
||||
if (input_org_uuid == org_uuid) {
|
||||
_post(`${BASE_URL}/admin/organizations/${org_uuid}/delete`,
|
||||
"Organization deleted correctly",
|
||||
"Error deleting organization"
|
||||
);
|
||||
} else {
|
||||
alert("Wrong organization uuid, please try again");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function initActions() {
|
||||
document.querySelectorAll("button[vw-delete-organization]").forEach(btn => {
|
||||
btn.addEventListener("click", deleteOrganization);
|
||||
});
|
||||
|
||||
if (jdenticon) {
|
||||
jdenticon();
|
||||
}
|
||||
}
|
||||
|
||||
// onLoad events
|
||||
document.addEventListener("DOMContentLoaded", (/*event*/) => {
|
||||
jQuery("#orgs-table").DataTable({
|
||||
"drawCallback": function() {
|
||||
initActions();
|
||||
},
|
||||
"stateSave": true,
|
||||
"responsive": true,
|
||||
"lengthMenu": [
|
||||
[-1, 5, 10, 25, 50],
|
||||
["All", 5, 10, 25, 50]
|
||||
],
|
||||
"pageLength": -1, // Default show all
|
||||
"columnDefs": [{
|
||||
"targets": [4,5],
|
||||
"searchable": false,
|
||||
"orderable": false
|
||||
}]
|
||||
});
|
||||
|
||||
// Add click events for organization actions
|
||||
initActions();
|
||||
|
||||
const btnReload = document.getElementById("reload");
|
||||
if (btnReload) {
|
||||
btnReload.addEventListener("click", reload);
|
||||
}
|
||||
});
|
232
src/static/scripts/admin_settings.js
vendored
Normal file
232
src/static/scripts/admin_settings.js
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
"use strict";
|
||||
/* eslint-env es2017, browser */
|
||||
/* global _post:readable, BASE_URL:readable */
|
||||
|
||||
function smtpTest(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
if (formHasChanges(config_form)) {
|
||||
alert("Config has been changed but not yet saved.\nPlease save the changes first before sending a test email.");
|
||||
return false;
|
||||
}
|
||||
|
||||
const test_email = document.getElementById("smtp-test-email");
|
||||
|
||||
// Do a very very basic email address check.
|
||||
if (test_email.value.match(/\S+@\S+/i) === null) {
|
||||
test_email.parentElement.classList.add("was-validated");
|
||||
return false;
|
||||
}
|
||||
|
||||
const data = JSON.stringify({ "email": test_email.value });
|
||||
_post(`${BASE_URL}/admin/test/smtp/`,
|
||||
"SMTP Test email sent correctly",
|
||||
"Error sending SMTP test email",
|
||||
data, false
|
||||
);
|
||||
}
|
||||
|
||||
function getFormData() {
|
||||
let data = {};
|
||||
|
||||
document.querySelectorAll(".conf-checkbox").forEach(function (e) {
|
||||
data[e.name] = e.checked;
|
||||
});
|
||||
|
||||
document.querySelectorAll(".conf-number").forEach(function (e) {
|
||||
data[e.name] = e.value ? +e.value : null;
|
||||
});
|
||||
|
||||
document.querySelectorAll(".conf-text, .conf-password").forEach(function (e) {
|
||||
data[e.name] = e.value || null;
|
||||
});
|
||||
return data;
|
||||
}
|
||||
|
||||
function saveConfig(event) {
|
||||
const data = JSON.stringify(getFormData());
|
||||
_post(`${BASE_URL}/admin/config/`,
|
||||
"Config saved correctly",
|
||||
"Error saving config",
|
||||
data
|
||||
);
|
||||
event.preventDefault();
|
||||
}
|
||||
|
||||
function deleteConf(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
const input = prompt(
|
||||
"This will remove all user configurations, and restore the defaults and the " +
|
||||
"values set by the environment. This operation could be dangerous. Type 'DELETE' to proceed:"
|
||||
);
|
||||
if (input === "DELETE") {
|
||||
_post(`${BASE_URL}/admin/config/delete`,
|
||||
"Config deleted correctly",
|
||||
"Error deleting config"
|
||||
);
|
||||
} else {
|
||||
alert("Wrong input, please try again");
|
||||
}
|
||||
}
|
||||
|
||||
function backupDatabase(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
_post(`${BASE_URL}/admin/config/backup_db`,
|
||||
"Backup created successfully",
|
||||
"Error creating backup", null, false
|
||||
);
|
||||
}
|
||||
|
||||
// Two functions to help check if there were changes to the form fields
|
||||
// Useful for example during the smtp test to prevent people from clicking save before testing there new settings
|
||||
function initChangeDetection(form) {
|
||||
const ignore_fields = ["smtp-test-email"];
|
||||
Array.from(form).forEach((el) => {
|
||||
if (! ignore_fields.includes(el.id)) {
|
||||
el.dataset.origValue = el.value;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function formHasChanges(form) {
|
||||
return Array.from(form).some(el => "origValue" in el.dataset && ( el.dataset.origValue !== el.value));
|
||||
}
|
||||
|
||||
// This function will prevent submitting a from when someone presses enter.
|
||||
function preventFormSubmitOnEnter(form) {
|
||||
if (form) {
|
||||
form.addEventListener("keypress", (event) => {
|
||||
if (event.key == "Enter") {
|
||||
event.preventDefault();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// This function will hook into the smtp-test-email input field and will call the smtpTest() function when enter is pressed.
|
||||
function submitTestEmailOnEnter() {
|
||||
const smtp_test_email_input = document.getElementById("smtp-test-email");
|
||||
if (smtp_test_email_input) {
|
||||
smtp_test_email_input.addEventListener("keypress", (event) => {
|
||||
if (event.key == "Enter") {
|
||||
event.preventDefault();
|
||||
smtpTest(event);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Colorize some settings which are high risk
|
||||
function colorRiskSettings() {
|
||||
const risk_items = document.getElementsByClassName("col-form-label");
|
||||
Array.from(risk_items).forEach((el) => {
|
||||
if (el.innerText.toLowerCase().includes("risks") ) {
|
||||
el.parentElement.className += " alert-danger";
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function toggleVis(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
|
||||
const elem = document.getElementById(event.target.dataset.vwPwToggle);
|
||||
const type = elem.getAttribute("type");
|
||||
if (type === "text") {
|
||||
elem.setAttribute("type", "password");
|
||||
} else {
|
||||
elem.setAttribute("type", "text");
|
||||
}
|
||||
}
|
||||
|
||||
function masterCheck(check_id, inputs_query) {
|
||||
function onChanged(checkbox, inputs_query) {
|
||||
return function _fn() {
|
||||
document.querySelectorAll(inputs_query).forEach(function (e) { e.disabled = !checkbox.checked; });
|
||||
checkbox.disabled = false;
|
||||
};
|
||||
}
|
||||
|
||||
const checkbox = document.getElementById(check_id);
|
||||
if (checkbox) {
|
||||
const onChange = onChanged(checkbox, inputs_query);
|
||||
onChange(); // Trigger the event initially
|
||||
checkbox.addEventListener("change", onChange);
|
||||
}
|
||||
}
|
||||
|
||||
// This will check if the ADMIN_TOKEN is not a Argon2 hashed value.
|
||||
// Else it will show a warning, unless someone has closed it.
|
||||
// Then it will not show this warning for 30 days.
|
||||
function checkAdminToken() {
|
||||
const admin_token = document.getElementById("input_admin_token");
|
||||
const disable_admin_token = document.getElementById("input_disable_admin_token");
|
||||
if (!disable_admin_token.checked && !admin_token.value.startsWith("$argon2")) {
|
||||
// Check if the warning has been closed before and 30 days have passed
|
||||
const admin_token_warning_closed = localStorage.getItem("admin_token_warning_closed");
|
||||
if (admin_token_warning_closed !== null) {
|
||||
const closed_date = new Date(parseInt(admin_token_warning_closed));
|
||||
const current_date = new Date();
|
||||
const thirtyDays = 1000*60*60*24*30;
|
||||
if (current_date - closed_date < thirtyDays) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// When closing the alert, store the current date/time in the browser
|
||||
const admin_token_warning = document.getElementById("admin_token_warning");
|
||||
admin_token_warning.addEventListener("closed.bs.alert", function() {
|
||||
const d = new Date();
|
||||
localStorage.setItem("admin_token_warning_closed", d.getTime());
|
||||
});
|
||||
|
||||
// Display the warning
|
||||
admin_token_warning.classList.remove("d-none");
|
||||
}
|
||||
}
|
||||
|
||||
// This will check for specific configured values, and when needed will show a warning div
|
||||
function showWarnings() {
|
||||
checkAdminToken();
|
||||
}
|
||||
|
||||
const config_form = document.getElementById("config-form");
|
||||
|
||||
// onLoad events
|
||||
document.addEventListener("DOMContentLoaded", (/*event*/) => {
|
||||
initChangeDetection(config_form);
|
||||
// Prevent enter to submitting the form and save the config.
|
||||
// Users need to really click on save, this also to prevent accidental submits.
|
||||
preventFormSubmitOnEnter(config_form);
|
||||
|
||||
submitTestEmailOnEnter();
|
||||
colorRiskSettings();
|
||||
|
||||
document.querySelectorAll("input[id^='input__enable_']").forEach(group_toggle => {
|
||||
const input_id = group_toggle.id.replace("input__enable_", "#g_");
|
||||
masterCheck(group_toggle.id, `${input_id} input`);
|
||||
});
|
||||
|
||||
document.querySelectorAll("button[data-vw-pw-toggle]").forEach(password_toggle_btn => {
|
||||
password_toggle_btn.addEventListener("click", toggleVis);
|
||||
});
|
||||
|
||||
const btnBackupDatabase = document.getElementById("backupDatabase");
|
||||
if (btnBackupDatabase) {
|
||||
btnBackupDatabase.addEventListener("click", backupDatabase);
|
||||
}
|
||||
const btnDeleteConf = document.getElementById("deleteConf");
|
||||
if (btnDeleteConf) {
|
||||
btnDeleteConf.addEventListener("click", deleteConf);
|
||||
}
|
||||
const btnSmtpTest = document.getElementById("smtpTest");
|
||||
if (btnSmtpTest) {
|
||||
btnSmtpTest.addEventListener("click", smtpTest);
|
||||
}
|
||||
|
||||
config_form.addEventListener("submit", saveConfig);
|
||||
|
||||
showWarnings();
|
||||
});
|
279
src/static/scripts/admin_users.js
vendored
Normal file
279
src/static/scripts/admin_users.js
vendored
Normal file
@@ -0,0 +1,279 @@
|
||||
"use strict";
|
||||
/* eslint-env es2017, browser, jquery */
|
||||
/* global _post:readable, BASE_URL:readable, reload:readable, jdenticon:readable */
|
||||
|
||||
function deleteUser(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
const id = event.target.parentNode.dataset.vwUserUuid;
|
||||
const email = event.target.parentNode.dataset.vwUserEmail;
|
||||
if (!id || !email) {
|
||||
alert("Required parameters not found!");
|
||||
return false;
|
||||
}
|
||||
const input_email = prompt(`To delete user "${email}", please type the email below`);
|
||||
if (input_email != null) {
|
||||
if (input_email == email) {
|
||||
_post(`${BASE_URL}/admin/users/${id}/delete`,
|
||||
"User deleted correctly",
|
||||
"Error deleting user"
|
||||
);
|
||||
} else {
|
||||
alert("Wrong email, please try again");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function remove2fa(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
const id = event.target.parentNode.dataset.vwUserUuid;
|
||||
const email = event.target.parentNode.dataset.vwUserEmail;
|
||||
if (!id || !email) {
|
||||
alert("Required parameters not found!");
|
||||
return false;
|
||||
}
|
||||
const confirmed = confirm(`Are you sure you want to remove 2FA for "${email}"?`);
|
||||
if (confirmed) {
|
||||
_post(`${BASE_URL}/admin/users/${id}/remove-2fa`,
|
||||
"2FA removed correctly",
|
||||
"Error removing 2FA"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function deauthUser(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
const id = event.target.parentNode.dataset.vwUserUuid;
|
||||
const email = event.target.parentNode.dataset.vwUserEmail;
|
||||
if (!id || !email) {
|
||||
alert("Required parameters not found!");
|
||||
return false;
|
||||
}
|
||||
const confirmed = confirm(`Are you sure you want to deauthorize sessions for "${email}"?`);
|
||||
if (confirmed) {
|
||||
_post(`${BASE_URL}/admin/users/${id}/deauth`,
|
||||
"Sessions deauthorized correctly",
|
||||
"Error deauthorizing sessions"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function disableUser(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
const id = event.target.parentNode.dataset.vwUserUuid;
|
||||
const email = event.target.parentNode.dataset.vwUserEmail;
|
||||
if (!id || !email) {
|
||||
alert("Required parameters not found!");
|
||||
return false;
|
||||
}
|
||||
const confirmed = confirm(`Are you sure you want to disable user "${email}"? This will also deauthorize their sessions.`);
|
||||
if (confirmed) {
|
||||
_post(`${BASE_URL}/admin/users/${id}/disable`,
|
||||
"User disabled successfully",
|
||||
"Error disabling user"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function enableUser(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
const id = event.target.parentNode.dataset.vwUserUuid;
|
||||
const email = event.target.parentNode.dataset.vwUserEmail;
|
||||
if (!id || !email) {
|
||||
alert("Required parameters not found!");
|
||||
return false;
|
||||
}
|
||||
const confirmed = confirm(`Are you sure you want to enable user "${email}"?`);
|
||||
if (confirmed) {
|
||||
_post(`${BASE_URL}/admin/users/${id}/enable`,
|
||||
"User enabled successfully",
|
||||
"Error enabling user"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function updateRevisions(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
_post(`${BASE_URL}/admin/users/update_revision`,
|
||||
"Success, clients will sync next time they connect",
|
||||
"Error forcing clients to sync"
|
||||
);
|
||||
}
|
||||
|
||||
function inviteUser(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
const email = document.getElementById("inviteEmail");
|
||||
const data = JSON.stringify({
|
||||
"email": email.value
|
||||
});
|
||||
email.value = "";
|
||||
_post(`${BASE_URL}/admin/invite/`,
|
||||
"User invited correctly",
|
||||
"Error inviting user",
|
||||
data
|
||||
);
|
||||
}
|
||||
|
||||
const ORG_TYPES = {
|
||||
"0": {
|
||||
"name": "Owner",
|
||||
"color": "orange"
|
||||
},
|
||||
"1": {
|
||||
"name": "Admin",
|
||||
"color": "blueviolet"
|
||||
},
|
||||
"2": {
|
||||
"name": "User",
|
||||
"color": "blue"
|
||||
},
|
||||
"3": {
|
||||
"name": "Manager",
|
||||
"color": "green"
|
||||
},
|
||||
};
|
||||
|
||||
// Special sort function to sort dates in ISO format
|
||||
jQuery.extend(jQuery.fn.dataTableExt.oSort, {
|
||||
"date-iso-pre": function(a) {
|
||||
let x;
|
||||
const sortDate = a.replace(/(<([^>]+)>)/gi, "").trim();
|
||||
if (sortDate !== "") {
|
||||
const dtParts = sortDate.split(" ");
|
||||
const timeParts = (undefined != dtParts[1]) ? dtParts[1].split(":") : ["00", "00", "00"];
|
||||
const dateParts = dtParts[0].split("-");
|
||||
x = (dateParts[0] + dateParts[1] + dateParts[2] + timeParts[0] + timeParts[1] + ((undefined != timeParts[2]) ? timeParts[2] : 0)) * 1;
|
||||
if (isNaN(x)) {
|
||||
x = 0;
|
||||
}
|
||||
} else {
|
||||
x = Infinity;
|
||||
}
|
||||
return x;
|
||||
},
|
||||
|
||||
"date-iso-asc": function(a, b) {
|
||||
return a - b;
|
||||
},
|
||||
|
||||
"date-iso-desc": function(a, b) {
|
||||
return b - a;
|
||||
}
|
||||
});
|
||||
|
||||
const userOrgTypeDialog = document.getElementById("userOrgTypeDialog");
|
||||
// Fill the form and title
|
||||
userOrgTypeDialog.addEventListener("show.bs.modal", function(event) {
|
||||
// Get shared values
|
||||
const userEmail = event.relatedTarget.parentNode.dataset.vwUserEmail;
|
||||
const userUuid = event.relatedTarget.parentNode.dataset.vwUserUuid;
|
||||
// Get org specific values
|
||||
const userOrgType = event.relatedTarget.dataset.vwOrgType;
|
||||
const userOrgTypeName = ORG_TYPES[userOrgType]["name"];
|
||||
const orgName = event.relatedTarget.dataset.vwOrgName;
|
||||
const orgUuid = event.relatedTarget.dataset.vwOrgUuid;
|
||||
|
||||
document.getElementById("userOrgTypeDialogTitle").innerHTML = `<b>Update User Type:</b><br><b>Organization:</b> ${orgName}<br><b>User:</b> ${userEmail}`;
|
||||
document.getElementById("userOrgTypeUserUuid").value = userUuid;
|
||||
document.getElementById("userOrgTypeOrgUuid").value = orgUuid;
|
||||
document.getElementById(`userOrgType${userOrgTypeName}`).checked = true;
|
||||
}, false);
|
||||
|
||||
// Prevent accidental submission of the form with valid elements after the modal has been hidden.
|
||||
userOrgTypeDialog.addEventListener("hide.bs.modal", function() {
|
||||
document.getElementById("userOrgTypeDialogTitle").innerHTML = "";
|
||||
document.getElementById("userOrgTypeUserUuid").value = "";
|
||||
document.getElementById("userOrgTypeOrgUuid").value = "";
|
||||
}, false);
|
||||
|
||||
function updateUserOrgType(event) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
|
||||
const data = JSON.stringify(Object.fromEntries(new FormData(event.target).entries()));
|
||||
|
||||
_post(`${BASE_URL}/admin/users/org_type`,
|
||||
"Updated organization type of the user successfully",
|
||||
"Error updating organization type of the user",
|
||||
data
|
||||
);
|
||||
}
|
||||
|
||||
function initUserTable() {
|
||||
// Color all the org buttons per type
|
||||
document.querySelectorAll("button[data-vw-org-type]").forEach(function(e) {
|
||||
const orgType = ORG_TYPES[e.dataset.vwOrgType];
|
||||
e.style.backgroundColor = orgType.color;
|
||||
e.title = orgType.name;
|
||||
});
|
||||
|
||||
document.querySelectorAll("button[vw-remove2fa]").forEach(btn => {
|
||||
btn.addEventListener("click", remove2fa);
|
||||
});
|
||||
document.querySelectorAll("button[vw-deauth-user]").forEach(btn => {
|
||||
btn.addEventListener("click", deauthUser);
|
||||
});
|
||||
document.querySelectorAll("button[vw-delete-user]").forEach(btn => {
|
||||
btn.addEventListener("click", deleteUser);
|
||||
});
|
||||
document.querySelectorAll("button[vw-disable-user]").forEach(btn => {
|
||||
btn.addEventListener("click", disableUser);
|
||||
});
|
||||
document.querySelectorAll("button[vw-enable-user]").forEach(btn => {
|
||||
btn.addEventListener("click", enableUser);
|
||||
});
|
||||
|
||||
if (jdenticon) {
|
||||
jdenticon();
|
||||
}
|
||||
}
|
||||
|
||||
// onLoad events
|
||||
document.addEventListener("DOMContentLoaded", (/*event*/) => {
|
||||
jQuery("#users-table").DataTable({
|
||||
"drawCallback": function() {
|
||||
initUserTable();
|
||||
},
|
||||
"stateSave": true,
|
||||
"responsive": true,
|
||||
"lengthMenu": [
|
||||
[-1, 2, 5, 10, 25, 50],
|
||||
["All", 2, 5, 10, 25, 50]
|
||||
],
|
||||
"pageLength": -1, // Default show all
|
||||
"columnDefs": [{
|
||||
"targets": [1, 2],
|
||||
"type": "date-iso"
|
||||
}, {
|
||||
"targets": 6,
|
||||
"searchable": false,
|
||||
"orderable": false
|
||||
}]
|
||||
});
|
||||
|
||||
// Add click events for user actions
|
||||
initUserTable();
|
||||
|
||||
const btnUpdateRevisions = document.getElementById("updateRevisions");
|
||||
if (btnUpdateRevisions) {
|
||||
btnUpdateRevisions.addEventListener("click", updateRevisions);
|
||||
}
|
||||
const btnReload = document.getElementById("reload");
|
||||
if (btnReload) {
|
||||
btnReload.addEventListener("click", reload);
|
||||
}
|
||||
const btnUserOrgTypeForm = document.getElementById("userOrgTypeForm");
|
||||
if (btnUserOrgTypeForm) {
|
||||
btnUserOrgTypeForm.addEventListener("submit", updateUserOrgType);
|
||||
}
|
||||
const btnInviteUserForm = document.getElementById("inviteUserForm");
|
||||
if (btnInviteUserForm) {
|
||||
btnInviteUserForm.addEventListener("submit", inviteUser);
|
||||
}
|
||||
});
|
2
src/static/scripts/bootstrap.css
vendored
2
src/static/scripts/bootstrap.css
vendored
@@ -10874,5 +10874,3 @@ textarea.form-control-lg {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
|
||||
/*# sourceMappingURL=bootstrap.css.map */
|
29
src/static/scripts/datatables.css
vendored
29
src/static/scripts/datatables.css
vendored
@@ -4,13 +4,19 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs5/dt-1.13.1
|
||||
* https://datatables.net/download/#bs5/dt-1.13.2
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 1.13.1
|
||||
* DataTables 1.13.2
|
||||
*/
|
||||
|
||||
@charset "UTF-8";
|
||||
:root {
|
||||
--dt-row-selected: 13, 110, 253;
|
||||
--dt-row-selected-text: 255, 255, 255;
|
||||
--dt-row-selected-link: 9, 10, 11;
|
||||
}
|
||||
|
||||
table.dataTable td.dt-control {
|
||||
text-align: center;
|
||||
cursor: pointer;
|
||||
@@ -126,7 +132,7 @@ div.dataTables_processing > div:last-child > div {
|
||||
width: 13px;
|
||||
height: 13px;
|
||||
border-radius: 50%;
|
||||
background: rgba(13, 110, 253, 0.9);
|
||||
background: 13 110 253;
|
||||
animation-timing-function: cubic-bezier(0, 1, 1, 0);
|
||||
}
|
||||
div.dataTables_processing > div:last-child > div:nth-child(1) {
|
||||
@@ -284,23 +290,28 @@ table.dataTable > tbody > tr {
|
||||
background-color: transparent;
|
||||
}
|
||||
table.dataTable > tbody > tr.selected > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.9);
|
||||
color: white;
|
||||
box-shadow: inset 0 0 0 9999px rgb(13, 110, 253);
|
||||
box-shadow: inset 0 0 0 9999px rgb(var(--dt-row-selected));
|
||||
color: rgb(255, 255, 255);
|
||||
color: rgb(var(--dt-row-selected-text));
|
||||
}
|
||||
table.dataTable > tbody > tr.selected a {
|
||||
color: #090a0b;
|
||||
color: rgb(9, 10, 11);
|
||||
color: rgb(var(--dt-row-selected-link));
|
||||
}
|
||||
table.dataTable.table-striped > tbody > tr.odd > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(0, 0, 0, 0.05);
|
||||
}
|
||||
table.dataTable.table-striped > tbody > tr.odd.selected > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.95);
|
||||
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.95);
|
||||
}
|
||||
table.dataTable.table-hover > tbody > tr:hover > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(0, 0, 0, 0.075);
|
||||
}
|
||||
table.dataTable.table-hover > tbody > tr.selected:hover > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.975);
|
||||
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.975);
|
||||
}
|
||||
|
||||
div.dataTables_wrapper div.dataTables_length label {
|
||||
@@ -374,9 +385,9 @@ div.dataTables_scrollFoot > .dataTables_scrollFootInner > table {
|
||||
|
||||
@media screen and (max-width: 767px) {
|
||||
div.dataTables_wrapper div.dataTables_length,
|
||||
div.dataTables_wrapper div.dataTables_filter,
|
||||
div.dataTables_wrapper div.dataTables_info,
|
||||
div.dataTables_wrapper div.dataTables_paginate {
|
||||
div.dataTables_wrapper div.dataTables_filter,
|
||||
div.dataTables_wrapper div.dataTables_info,
|
||||
div.dataTables_wrapper div.dataTables_paginate {
|
||||
text-align: center;
|
||||
}
|
||||
div.dataTables_wrapper div.dataTables_paginate ul.pagination {
|
||||
|
64
src/static/scripts/datatables.js
vendored
64
src/static/scripts/datatables.js
vendored
@@ -4,20 +4,20 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs5/dt-1.13.1
|
||||
* https://datatables.net/download/#bs5/dt-1.13.2
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 1.13.1
|
||||
* DataTables 1.13.2
|
||||
*/
|
||||
|
||||
/*! DataTables 1.13.1
|
||||
* ©2008-2022 SpryMedia Ltd - datatables.net/license
|
||||
/*! DataTables 1.13.2
|
||||
* ©2008-2023 SpryMedia Ltd - datatables.net/license
|
||||
*/
|
||||
|
||||
/**
|
||||
* @summary DataTables
|
||||
* @description Paginate, search and order HTML tables
|
||||
* @version 1.13.1
|
||||
* @version 1.13.2
|
||||
* @author SpryMedia Ltd
|
||||
* @contact www.datatables.net
|
||||
* @copyright SpryMedia Ltd.
|
||||
@@ -1382,7 +1382,12 @@
|
||||
|
||||
|
||||
var _isNumber = function ( d, decimalPoint, formatted ) {
|
||||
var strType = typeof d === 'string';
|
||||
let type = typeof d;
|
||||
var strType = type === 'string';
|
||||
|
||||
if ( type === 'number' || type === 'bigint') {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If empty return immediately so there must be a number if it is a
|
||||
// formatted string (this stops the string "k", or "kr", etc being detected
|
||||
@@ -6789,8 +6794,15 @@
|
||||
|
||||
if ( eventName !== null ) {
|
||||
var e = $.Event( eventName+'.dt' );
|
||||
var table = $(settings.nTable);
|
||||
|
||||
$(settings.nTable).trigger( e, args );
|
||||
table.trigger( e, args );
|
||||
|
||||
// If not yet attached to the document, trigger the event
|
||||
// on the body directly to sort of simulate the bubble
|
||||
if (table.parents('body').length === 0) {
|
||||
$('body').trigger( e, args );
|
||||
}
|
||||
|
||||
ret.push( e.result );
|
||||
}
|
||||
@@ -7256,7 +7268,7 @@
|
||||
|
||||
pluck: function ( prop )
|
||||
{
|
||||
let fn = DataTable.util.get(prop);
|
||||
var fn = DataTable.util.get(prop);
|
||||
|
||||
return this.map( function ( el ) {
|
||||
return fn(el);
|
||||
@@ -8353,10 +8365,9 @@
|
||||
|
||||
$(document).on('plugin-init.dt', function (e, context) {
|
||||
var api = new _Api( context );
|
||||
|
||||
const namespace = 'on-plugin-init';
|
||||
const stateSaveParamsEvent = `stateSaveParams.${namespace}`;
|
||||
const destroyEvent = `destroy.${namespace}`;
|
||||
var namespace = 'on-plugin-init';
|
||||
var stateSaveParamsEvent = 'stateSaveParams.' + namespace;
|
||||
var destroyEvent = 'destroy. ' + namespace;
|
||||
|
||||
api.on( stateSaveParamsEvent, function ( e, settings, d ) {
|
||||
// This could be more compact with the API, but it is a lot faster as a simple
|
||||
@@ -8375,7 +8386,7 @@
|
||||
});
|
||||
|
||||
api.on( destroyEvent, function () {
|
||||
api.off(`${stateSaveParamsEvent} ${destroyEvent}`);
|
||||
api.off(stateSaveParamsEvent + ' ' + destroyEvent);
|
||||
});
|
||||
|
||||
var loaded = api.state.loaded();
|
||||
@@ -9697,7 +9708,7 @@
|
||||
* @type string
|
||||
* @default Version number
|
||||
*/
|
||||
DataTable.version = "1.13.1";
|
||||
DataTable.version = "1.13.2";
|
||||
|
||||
/**
|
||||
* Private data store, containing all of the settings objects that are
|
||||
@@ -14121,7 +14132,7 @@
|
||||
*
|
||||
* @type string
|
||||
*/
|
||||
build:"bs5/dt-1.13.1",
|
||||
build:"bs5/dt-1.13.2",
|
||||
|
||||
|
||||
/**
|
||||
@@ -14830,10 +14841,17 @@
|
||||
}
|
||||
|
||||
if ( btnDisplay !== null ) {
|
||||
node = $('<a>', {
|
||||
var tag = settings.oInit.pagingTag || 'a';
|
||||
var disabled = btnClass.indexOf(disabledClass) !== -1;
|
||||
|
||||
|
||||
node = $('<'+tag+'>', {
|
||||
'class': classes.sPageButton+' '+btnClass,
|
||||
'aria-controls': settings.sTableId,
|
||||
'aria-disabled': disabled ? 'true' : null,
|
||||
'aria-label': aria[ button ],
|
||||
'aria-role': 'link',
|
||||
'aria-current': btnClass === classes.sPageButtonActive ? 'page' : null,
|
||||
'data-dt-idx': button,
|
||||
'tabindex': tabIndex,
|
||||
'id': idx === 0 && typeof button === 'string' ?
|
||||
@@ -14965,6 +14983,12 @@
|
||||
if ( d !== 0 && (!d || d === '-') ) {
|
||||
return -Infinity;
|
||||
}
|
||||
|
||||
let type = typeof d;
|
||||
|
||||
if (type === 'number' || type === 'bigint') {
|
||||
return d;
|
||||
}
|
||||
|
||||
// If a decimal place other than `.` is used, it needs to be given to the
|
||||
// function so we can detect it and replace with a `.` which is the only
|
||||
@@ -15647,7 +15671,6 @@
|
||||
require('datatables.net')(root, $);
|
||||
}
|
||||
|
||||
|
||||
return factory( $, root, root.document );
|
||||
};
|
||||
}
|
||||
@@ -15755,6 +15778,8 @@ DataTable.ext.renderer.pageButton.bootstrap = function ( settings, host, idx, bu
|
||||
}
|
||||
|
||||
if ( btnDisplay ) {
|
||||
var disabled = btnClass.indexOf('disabled') !== -1;
|
||||
|
||||
node = $('<li>', {
|
||||
'class': classes.sPageButton+' '+btnClass,
|
||||
'id': idx === 0 && typeof button === 'string' ?
|
||||
@@ -15762,9 +15787,12 @@ DataTable.ext.renderer.pageButton.bootstrap = function ( settings, host, idx, bu
|
||||
null
|
||||
} )
|
||||
.append( $('<a>', {
|
||||
'href': '#',
|
||||
'href': disabled ? null : '#',
|
||||
'aria-controls': settings.sTableId,
|
||||
'aria-disabled': disabled ? 'true' : null,
|
||||
'aria-label': aria[ button ],
|
||||
'aria-role': 'link',
|
||||
'aria-current': btnClass === 'active' ? 'page' : null,
|
||||
'data-dt-idx': button,
|
||||
'tabindex': settings.iTabIndex,
|
||||
'class': 'page-link'
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* jQuery JavaScript Library v3.6.2 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/Tween,-effects/animatedSelector
|
||||
* jQuery JavaScript Library v3.6.3 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/Tween,-effects/animatedSelector
|
||||
* https://jquery.com/
|
||||
*
|
||||
* Includes Sizzle.js
|
||||
@@ -9,7 +9,7 @@
|
||||
* Released under the MIT license
|
||||
* https://jquery.org/license
|
||||
*
|
||||
* Date: 2022-12-13T14:56Z
|
||||
* Date: 2022-12-20T21:28Z
|
||||
*/
|
||||
( function( global, factory ) {
|
||||
|
||||
@@ -151,7 +151,7 @@ function toType( obj ) {
|
||||
|
||||
|
||||
var
|
||||
version = "3.6.2 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/Tween,-effects/animatedSelector",
|
||||
version = "3.6.3 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/Tween,-effects/animatedSelector",
|
||||
|
||||
// Define a local copy of jQuery
|
||||
jQuery = function( selector, context ) {
|
||||
@@ -522,14 +522,14 @@ function isArrayLike( obj ) {
|
||||
}
|
||||
var Sizzle =
|
||||
/*!
|
||||
* Sizzle CSS Selector Engine v2.3.8
|
||||
* Sizzle CSS Selector Engine v2.3.9
|
||||
* https://sizzlejs.com/
|
||||
*
|
||||
* Copyright JS Foundation and other contributors
|
||||
* Released under the MIT license
|
||||
* https://js.foundation/
|
||||
*
|
||||
* Date: 2022-11-16
|
||||
* Date: 2022-12-19
|
||||
*/
|
||||
( function( window ) {
|
||||
var i,
|
||||
@@ -890,7 +890,7 @@ function Sizzle( selector, context, results, seed ) {
|
||||
if ( support.cssSupportsSelector &&
|
||||
|
||||
// eslint-disable-next-line no-undef
|
||||
!CSS.supports( "selector(" + newSelector + ")" ) ) {
|
||||
!CSS.supports( "selector(:is(" + newSelector + "))" ) ) {
|
||||
|
||||
// Support: IE 11+
|
||||
// Throw to get to the same code path as an error directly in qSA.
|
||||
@@ -1492,9 +1492,8 @@ setDocument = Sizzle.setDocument = function( node ) {
|
||||
// `:has()` uses a forgiving selector list as an argument so our regular
|
||||
// `try-catch` mechanism fails to catch `:has()` with arguments not supported
|
||||
// natively like `:has(:contains("Foo"))`. Where supported & spec-compliant,
|
||||
// we now use `CSS.supports("selector(SELECTOR_TO_BE_TESTED)")` but outside
|
||||
// that, let's mark `:has` as buggy to always use jQuery traversal for
|
||||
// `:has()`.
|
||||
// we now use `CSS.supports("selector(:is(SELECTOR_TO_BE_TESTED))")`, but
|
||||
// outside that we mark `:has` as buggy.
|
||||
rbuggyQSA.push( ":has" );
|
||||
}
|
||||
|
@@ -7,31 +7,7 @@
|
||||
<link rel="icon" type="image/png" href="{{urlpath}}/vw_static/vaultwarden-favicon.png">
|
||||
<title>Page not found!</title>
|
||||
<link rel="stylesheet" href="{{urlpath}}/vw_static/bootstrap.css" />
|
||||
<style>
|
||||
body {
|
||||
padding-top: 75px;
|
||||
}
|
||||
.vaultwarden-icon {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
height: 32px;
|
||||
width: auto;
|
||||
margin: -5px 0 0 0;
|
||||
}
|
||||
.footer {
|
||||
padding: 40px 0 40px 0;
|
||||
border-top: 1px solid #dee2e6;
|
||||
}
|
||||
.container {
|
||||
max-width: 980px;
|
||||
}
|
||||
.content {
|
||||
padding-top: 20px;
|
||||
padding-bottom: 20px;
|
||||
padding-left: 15px;
|
||||
padding-right: 15px;
|
||||
}
|
||||
</style>
|
||||
<link rel="stylesheet" href="{{urlpath}}/vw_static/404.css" />
|
||||
</head>
|
||||
|
||||
<body class="bg-light">
|
||||
@@ -53,7 +29,7 @@
|
||||
<h2>Page not found!</h2>
|
||||
<p class="lead">Sorry, but the page you were looking for could not be found.</p>
|
||||
<p class="display-6">
|
||||
<a href="{{urlpath}}/"><img style="max-width: 500px; width: 100%;" src="{{urlpath}}/vw_static/404.png" alt="Return to the web vault?"></a></p>
|
||||
<a href="{{urlpath}}/"><img class="vw-404" src="{{urlpath}}/vw_static/404.png" alt="Return to the web vault?"></a></p>
|
||||
<p>You can <a href="{{urlpath}}/">return to the web-vault</a>, or <a href="https://github.com/dani-garcia/vaultwarden">contact us</a>.</p>
|
||||
</main>
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user