mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 02:35:58 +03:00
Compare commits
350 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
3dbfc484a5 | ||
|
4ec2507073 | ||
|
ab65d7989b | ||
|
8707728cdb | ||
|
631d022e17 | ||
|
211f4492fa | ||
|
61f9081827 | ||
|
a8e5384c4a | ||
|
1c7338c7c4 | ||
|
08f37b9935 | ||
|
4826ddca4c | ||
|
2b32b6f78c | ||
|
a6cfdddfd8 | ||
|
814ce9a6ac | ||
|
1bee46f64b | ||
|
556d945396 | ||
|
664b480c71 | ||
|
84e901b7d2 | ||
|
839b2bc950 | ||
|
6050c8dac5 | ||
|
0a6b797e6e | ||
|
fb6f441a4f | ||
|
9876aedd67 | ||
|
19e671ff25 | ||
|
60964c07e6 | ||
|
e4894524e4 | ||
|
e7f083dee9 | ||
|
1074315a87 | ||
|
c56bf38079 | ||
|
3c0cac623d | ||
|
550794b127 | ||
|
e818a0bf37 | ||
|
2aedff50e8 | ||
|
84a23008f4 | ||
|
44e9e1a58e | ||
|
e4606431d1 | ||
|
5b7d7390b0 | ||
|
a05187c0ff | ||
|
8e34495e73 | ||
|
4219249e11 | ||
|
bd883de70e | ||
|
2d66292350 | ||
|
adf67a8ee8 | ||
|
f40f5b8399 | ||
|
2d6ca0ea95 | ||
|
06a10e2c5a | ||
|
445680fb84 | ||
|
83376544d8 | ||
|
04a17dcdef | ||
|
0851561392 | ||
|
95cd6deda6 | ||
|
636f16dc66 | ||
|
9e5b049dca | ||
|
23aa9088f3 | ||
|
4f0ed06b06 | ||
|
349c97efaf | ||
|
8b05a5d192 | ||
|
83bf77d713 | ||
|
4d5c047ddc | ||
|
147c9c7b50 | ||
|
6515a2fcad | ||
|
4a2ed553df | ||
|
ba492c0602 | ||
|
1ec049e2b5 | ||
|
0fb8563b13 | ||
|
f906f6230a | ||
|
951ba55123 | ||
|
18abf226be | ||
|
393645617e | ||
|
5bf243b675 | ||
|
cfba8347a3 | ||
|
55c1b6e8d5 | ||
|
3d7e80a7aa | ||
|
5866338de4 | ||
|
271e3ae757 | ||
|
48cc31a59f | ||
|
6a7cee4e7e | ||
|
f850dbb310 | ||
|
07099df41a | ||
|
0c0a80720e | ||
|
ae437f70a3 | ||
|
3d11f4cd16 | ||
|
3bd4e42fb0 | ||
|
89e94b1d91 | ||
|
0b28ab3be1 | ||
|
c5bcc340fa | ||
|
bff54fbfdb | ||
|
867c6ba056 | ||
|
d1ecf03f44 | ||
|
fc43608eec | ||
|
15dd05c78d | ||
|
aa6f774f65 | ||
|
379f885354 | ||
|
39a5f2dbe8 | ||
|
0daaa9b175 | ||
|
0c085d21ce | ||
|
dcaaa430f0 | ||
|
2cda54ceff | ||
|
525e6bb65a | ||
|
62cebebd3d | ||
|
3646f14042 | ||
|
813e889c97 | ||
|
8bcd0ab0c6 | ||
|
5725d297b4 | ||
|
a428f05e77 | ||
|
467ecfdc99 | ||
|
ed8091a994 | ||
|
56cad93e0f | ||
|
3cf67e0b8d | ||
|
5800aceb2d | ||
|
729b563160 | ||
|
6b5618a5fc | ||
|
2aa72eb240 | ||
|
c8655c4f89 | ||
|
daaa03d1b3 | ||
|
9e5b94924f | ||
|
f21089900e | ||
|
0c0e632bc9 | ||
|
a13a5bd1d8 | ||
|
3b34b429f3 | ||
|
97ffd17789 | ||
|
10c5476d31 | ||
|
d3626eba2a | ||
|
de157b2654 | ||
|
337cbfaf22 | ||
|
f88b6d961e | ||
|
0426051541 | ||
|
4556f668de | ||
|
da8225a3bd | ||
|
f10e6b6ac2 | ||
|
7ec00d3850 | ||
|
8f8d7418ed | ||
|
af6d17b701 | ||
|
61183d001c | ||
|
024d12db08 | ||
|
dc7951efaf | ||
|
06e14fea55 | ||
|
0f656b4889 | ||
|
6fa1dc50be | ||
|
2bb41367bc | ||
|
20d8886bfa | ||
|
59ef82b740 | ||
|
fc543154c0 | ||
|
569b464157 | ||
|
adf83c698d | ||
|
8fcbc58ee2 | ||
|
2dcbb2be59 | ||
|
7026e004e1 | ||
|
a3084feaee | ||
|
e7d36de784 | ||
|
54cc47b14e | ||
|
fac44888cd | ||
|
9f056523c9 | ||
|
0af1ef387d | ||
|
f95f40be15 | ||
|
5c859e2e6c | ||
|
03ff5e6ece | ||
|
52d696aa74 | ||
|
a4e80712dd | ||
|
a947e434f0 | ||
|
2eb4f290a5 | ||
|
8ae799a771 | ||
|
9a5f3a5015 | ||
|
1ca0d6e245 | ||
|
7f69eebeb1 | ||
|
32bd9b83a3 | ||
|
477d60de49 | ||
|
1ba8275dcb | ||
|
a0a4994250 | ||
|
32dfa41970 | ||
|
f92efda0f0 | ||
|
3b0f643e9d | ||
|
5bcee24f88 | ||
|
9e3d7ea44c | ||
|
8cc6dac893 | ||
|
b7c4316c77 | ||
|
0c295d5e6e | ||
|
bc49d1f90d | ||
|
6f6d9dee83 | ||
|
cef5dd4a46 | ||
|
79061c0eb5 | ||
|
6e2c3fc1cc | ||
|
e301fe137f | ||
|
af69c83db2 | ||
|
53fa8da5b1 | ||
|
c58aac585b | ||
|
8c1117fcbf | ||
|
a6dd4f1206 | ||
|
5af1799991 | ||
|
a20a641de3 | ||
|
8abd38573b | ||
|
78abdf0e9d | ||
|
dc031d8d86 | ||
|
de6330b09d | ||
|
68bcc7a4b8 | ||
|
c04a1352cb | ||
|
5d1c11ceba | ||
|
a2aa7c9bc2 | ||
|
b3a351ccb2 | ||
|
679bc7a59b | ||
|
a72d0b518f | ||
|
6741b25907 | ||
|
24b5784f02 | ||
|
eb9b481eba | ||
|
64edc49392 | ||
|
0d1753ac74 | ||
|
a6558f5548 | ||
|
62dfeb80f2 | ||
|
26cd5d9643 | ||
|
e65fbbfc21 | ||
|
a2162f4d69 | ||
|
c9ed9aa733 | ||
|
9b20decdc1 | ||
|
adaefc8628 | ||
|
c6c45c4c49 | ||
|
95494083f2 | ||
|
686474f815 | ||
|
2c6bd8c9dc | ||
|
9366e31452 | ||
|
96ff32fb2f | ||
|
9342fa5744 | ||
|
50fc22966c | ||
|
4fab4c74ff | ||
|
e38e1a5d5f | ||
|
cc91ac6cc0 | ||
|
2d8c8e18f7 | ||
|
b17e2da2cf | ||
|
d121cce0d2 | ||
|
0eba7a88fa | ||
|
34ac16e9d7 | ||
|
906d9e2f1a | ||
|
623d84aeb5 | ||
|
f8122cd2ca | ||
|
9b7e86efc2 | ||
|
e7ccfbdd0e | ||
|
acc1474394 | ||
|
c90b3031a6 | ||
|
aaffb2e007 | ||
|
e0e95e95e4 | ||
|
fa70b440d0 | ||
|
42acb2ebb6 | ||
|
174bea8d6e | ||
|
f68a57950b | ||
|
f747bf126b | ||
|
1ca197fd46 | ||
|
63d05d929b | ||
|
ef5bf5d326 | ||
|
9d6e35d803 | ||
|
0cccdcab83 | ||
|
6607faa390 | ||
|
6fcf18ab51 | ||
|
d122c10573 | ||
|
ae9553ca1c | ||
|
ff919039c9 | ||
|
80eb15d46a | ||
|
c36b870c54 | ||
|
b7cbca590c | ||
|
606a1bbfcb | ||
|
3e5369c8dd | ||
|
dd5e4cec73 | ||
|
a31a040abd | ||
|
f0125b95c1 | ||
|
072f2e24c2 | ||
|
36b5350f9b | ||
|
c7489c9fdf | ||
|
3181e4e96e | ||
|
2ee0d53c5f | ||
|
dfa629ecc7 | ||
|
92dc48b882 | ||
|
367e1ce289 | ||
|
7390f34355 | ||
|
c47d9f6593 | ||
|
5399ee8208 | ||
|
117045e6d3 | ||
|
912ad64555 | ||
|
00855ee31d | ||
|
c18a273b4a | ||
|
ca24a4adf1 | ||
|
a263aaa481 | ||
|
0a20ba0020 | ||
|
6541600af6 | ||
|
525979d5d9 | ||
|
7dd1959eba | ||
|
e266b39254 | ||
|
e935989fee | ||
|
25c401f64d | ||
|
18b72da657 | ||
|
e8e6c89927 | ||
|
fd5f657334 | ||
|
da9605f2d2 | ||
|
7030de32d5 | ||
|
b67c5b77be | ||
|
d30878c4ea | ||
|
6be26f0a38 | ||
|
34a6bfaefa | ||
|
1c8749eb4d | ||
|
1198c36a2b | ||
|
41e6c1a383 | ||
|
0042c3e4a7 | ||
|
724190f262 | ||
|
6867d23ca2 | ||
|
de26af0c2d | ||
|
3f223a7514 | ||
|
23f5a62d61 | ||
|
81e2054f59 | ||
|
f9337effa5 | ||
|
2972904eb8 | ||
|
bdd918b4d4 | ||
|
88085fe17b | ||
|
2020a302d0 | ||
|
ab2dd0f300 | ||
|
8e6fd4b4a1 | ||
|
988d24927e | ||
|
e945d16fcf | ||
|
f1c0aa4f83 | ||
|
68362d06b3 | ||
|
f65c0e2ac8 | ||
|
0f588ced03 | ||
|
b0f03bb49c | ||
|
5063661028 | ||
|
7e66ab78ff | ||
|
665e275dc5 | ||
|
a6da728cca | ||
|
04e02d7f9f | ||
|
7c739dd58e | ||
|
05a552910c | ||
|
c990837066 | ||
|
57aec37507 | ||
|
0c5b4476ad | ||
|
17141147a8 | ||
|
193c2fa860 | ||
|
6d01aaa80f | ||
|
ad60eaa0f3 | ||
|
d878face07 | ||
|
8bf8388cd6 | ||
|
b4db853bcb | ||
|
5ee94c0ba9 | ||
|
f108349547 | ||
|
d25e1ab94b | ||
|
79fee269ee | ||
|
ffe362f856 | ||
|
04bb15a802 | ||
|
4d9d649db9 | ||
|
2897c24e83 | ||
|
5964dc95f0 | ||
|
613b2519ed | ||
|
996b60e43d | ||
|
a6d09407b9 | ||
|
f2e9ddef4e | ||
|
ca417d3257 |
@@ -72,6 +72,13 @@
|
||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||
# WEBSOCKET_PORT=3012
|
||||
|
||||
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||
# PUSH_ENABLED=true
|
||||
# PUSH_INSTALLATION_ID=CHANGEME
|
||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||
## Don't change this unless you know what you're doing.
|
||||
# PUSH_RELAY_BASE_URI=https://push.bitwarden.com
|
||||
|
||||
## Controls whether users are allowed to create Bitwarden Sends.
|
||||
## This setting applies globally to all users.
|
||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||
@@ -259,9 +266,15 @@
|
||||
## A comma-separated list means only those users can create orgs:
|
||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||
|
||||
## Token for the admin interface, preferably use a long random string
|
||||
## One option is to use 'openssl rand -base64 48'
|
||||
## Token for the admin interface, preferably an Argon2 PCH string
|
||||
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
||||
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
||||
## If not set, the admin panel is disabled
|
||||
## New Argon2 PHC string
|
||||
## Note that for some environments, like docker-compose you need to escape all the dollar signs `$` with an extra dollar sign like `$$`
|
||||
## Also, use single quotes (') instead of double quotes (") to enclose the string when needed
|
||||
# ADMIN_TOKEN='$argon2id$v=19$m=65540,t=3,p=4$MmeKRnGK5RW5mJS7h3TOL89GrpLPXJPAtTK8FTqj9HM$DqsstvoSAETl9YhnsXbf43WeaUwJC6JhViIvuPoig78'
|
||||
## Old plain text string (Will generate warnings in favor of Argon2)
|
||||
# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp
|
||||
|
||||
## Enable this to bypass the admin panel security. This option is only
|
||||
@@ -298,9 +311,9 @@
|
||||
## This setting applies globally to all users.
|
||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||
|
||||
## Controls the PBBKDF password iterations to apply on the server
|
||||
## The change only applies when the password is changed
|
||||
# PASSWORD_ITERATIONS=100000
|
||||
## Number of server-side passwords hashing iterations for the password hash.
|
||||
## The default for new users. If changed, it will be updated during login for existing users.
|
||||
# PASSWORD_ITERATIONS=350000
|
||||
|
||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||
# PASSWORD_HINTS_ALLOWED=true
|
||||
@@ -335,6 +348,9 @@
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||
|
||||
## Set the lifetime of admin sessions to this value (in minutes).
|
||||
# ADMIN_SESSION_LIFETIME=20
|
||||
|
||||
## Yubico (Yubikey) Settings
|
||||
## Set your Client ID and Secret Key for Yubikey OTP
|
||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||
@@ -373,7 +389,7 @@
|
||||
# ROCKET_WORKERS=10
|
||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||
|
||||
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
||||
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
@@ -385,6 +401,11 @@
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_TIMEOUT=15
|
||||
|
||||
# Whether to send mail via the `sendmail` command
|
||||
# USE_SENDMAIL=false
|
||||
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||
# SENDMAIL_COMMAND="/path/to/sendmail"
|
||||
|
||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||
## Multiple options need to be separated by a comma ','.
|
||||
|
34
.github/workflows/build.yml
vendored
34
.github/workflows/build.yml
vendored
@@ -9,6 +9,8 @@ on:
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
- "rustfmt.toml"
|
||||
- "diesel.toml"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/build.yml"
|
||||
@@ -17,6 +19,8 @@ on:
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
- "rustfmt.toml"
|
||||
- "diesel.toml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -26,42 +30,48 @@ jobs:
|
||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||
env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
channel:
|
||||
- "rust-toolchain" # The version defined in rust-toolchain
|
||||
- "msrv" # The supported MSRV
|
||||
include:
|
||||
- channel: "msrv"
|
||||
version: "1.60.0"
|
||||
|
||||
name: Build and Test ${{ matrix.channel }}
|
||||
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: "Install dependencies Ubuntu"
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||
# End Install dependencies
|
||||
|
||||
|
||||
# Determine rust-toolchain version
|
||||
- name: Init Variables
|
||||
id: toolchain
|
||||
shell: bash
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||
else
|
||||
RUST_TOOLCHAIN="${{ matrix.channel }}"
|
||||
fi
|
||||
echo "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" | tee -a "${GITHUB_OUTPUT}"
|
||||
# End Determine rust-toolchain version
|
||||
|
||||
# Uses the rust-toolchain file to determine version
|
||||
|
||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb # master @ 2022-10-25 - 21:40 GMT+2
|
||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
@@ -69,17 +79,17 @@ jobs:
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Install the MSRV channel to be used
|
||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||
- name: "Install MSRV version"
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb # master @ 2022-10-25 - 21:40 GMT+2
|
||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
# End Install the MSRV channel to be used
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0
|
||||
- uses: Swatinem/rust-cache@2656b87321093db1cb55fbd73183d195214fdfd1 # v2.5.0
|
||||
# End Enable Rust Caching
|
||||
|
||||
|
||||
@@ -184,7 +194,7 @@ jobs:
|
||||
|
||||
# Upload artifact to Github Actions
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
name: vaultwarden
|
||||
|
2
.github/workflows/hadolint.yml
vendored
2
.github/workflows/hadolint.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
|
155
.github/workflows/release.yml
vendored
155
.github/workflows/release.yml
vendored
@@ -48,11 +48,23 @@ jobs:
|
||||
ports:
|
||||
- 5000:5000
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1 # Disabled for now, but we should look at this because it will speedup building!
|
||||
# DOCKER_REPO/secrets.DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
|
||||
DOCKER_REPO: ${{ secrets.DOCKERHUB_REPO }}
|
||||
# Use BuildKit (https://docs.docker.com/build/buildkit/) for better
|
||||
# build performance and the ability to copy extended file attributes
|
||||
# (e.g., for executable capabilities) across build phases.
|
||||
DOCKER_BUILDKIT: 1
|
||||
SOURCE_COMMIT: ${{ github.sha }}
|
||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||
# The *_REPO variables need to be configured as repository variables
|
||||
# Append `/settings/variables/actions` to your repo url
|
||||
# DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
|
||||
# Check for Docker hub credentials in secrets
|
||||
HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
||||
# GHCR_REPO needs to be 'ghcr.io/<user>/<repo>'
|
||||
# Check for Github credentials in secrets
|
||||
HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }}
|
||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||
# Check for Quay.io credentials in secrets
|
||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -61,17 +73,10 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Determine Docker Tag
|
||||
- name: Init Variables
|
||||
id: vars
|
||||
@@ -85,34 +90,146 @@ jobs:
|
||||
fi
|
||||
# End Determine Docker Tag
|
||||
|
||||
- name: Build Debian based images
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Login to Quay.io
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
# Debian
|
||||
|
||||
# Docker Hub
|
||||
- name: Build Debian based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' }}
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images
|
||||
- name: Push Debian based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' }}
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Build Alpine based images
|
||||
# GitHub Container Registry
|
||||
- name: Build Debian based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Quay.io
|
||||
- name: Build Debian based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
# Alpine
|
||||
|
||||
# Docker Hub
|
||||
- name: Build Alpine based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images
|
||||
- name: Push Alpine based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
# GitHub Container Registry
|
||||
- name: Build Alpine based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Quay.io
|
||||
- name: Build Alpine based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
@@ -3,5 +3,9 @@ ignored:
|
||||
- DL3008
|
||||
# disable explicit version for apk install
|
||||
- DL3018
|
||||
# disable check for consecutive `RUN` instructions
|
||||
- DL3059
|
||||
trustedRegistries:
|
||||
- docker.io
|
||||
- ghcr.io
|
||||
- quay.io
|
||||
|
@@ -1,16 +1,20 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.3.0
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: mixed-line-ending
|
||||
args: ["--fix=no"]
|
||||
- id: end-of-file-fixer
|
||||
exclude: "(.*js$|.*css$)"
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: check-symlinks
|
||||
- id: forbid-submodules
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: fmt
|
||||
@@ -36,5 +40,5 @@ repos:
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$)
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|clippy.toml|.*\.rs$)
|
||||
pass_filenames: false
|
||||
|
1886
Cargo.lock
generated
1886
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
126
Cargo.toml
126
Cargo.toml
@@ -3,12 +3,12 @@ name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.60.0"
|
||||
rust-version = "1.68.2"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
readme = "README.md"
|
||||
license = "GPL-3.0-only"
|
||||
license = "AGPL-3.0-only"
|
||||
publish = false
|
||||
build = "build.rs"
|
||||
|
||||
@@ -36,70 +36,72 @@ unstable = []
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
# Logging
|
||||
syslog = "6.0.1" # Needs to be v4 until fern is updated
|
||||
syslog = "6.1.0"
|
||||
|
||||
[dependencies]
|
||||
# Logging
|
||||
log = "0.4.17"
|
||||
fern = { version = "0.6.1", features = ["syslog-6"] }
|
||||
log = "0.4.19"
|
||||
fern = { version = "0.6.2", features = ["syslog-6"] }
|
||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
backtrace = "0.3.67" # Logging panics to logfile instead stderr only
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenvy = { version = "0.15.6", default-features = false }
|
||||
dotenvy = { version = "0.15.7", default-features = false }
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.16.0"
|
||||
once_cell = "1.18.0"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.15"
|
||||
num-derive = "0.3.3"
|
||||
num-derive = "0.4.0"
|
||||
|
||||
# Web framework
|
||||
rocket = { version = "0.5.0-rc.2", features = ["tls", "json"], default-features = false }
|
||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
||||
# rocket_ws = { version ="0.1.0-rc.3" }
|
||||
rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # v0.5 branch
|
||||
|
||||
# WebSockets libraries
|
||||
tokio-tungstenite = "0.18.0"
|
||||
tokio-tungstenite = "0.19.0"
|
||||
rmpv = "1.0.0" # MessagePack library
|
||||
|
||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||
dashmap = "5.4.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.25"
|
||||
tokio = { version = "1.23.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
futures = "0.3.28"
|
||||
tokio = { version = "1.29.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.150", features = ["derive"] }
|
||||
serde_json = "1.0.89"
|
||||
serde = { version = "1.0.166", features = ["derive"] }
|
||||
serde_json = "1.0.99"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "2.0.2", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "2.0.0"
|
||||
diesel_logger = { version = "0.2.0", optional = true }
|
||||
diesel = { version = "2.1.0", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "2.1.0"
|
||||
diesel_logger = { version = "0.3.0", optional = true }
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.25.2", features = ["bundled"], optional = true }
|
||||
# Bundled/Static SQLite
|
||||
libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
ring = "0.16.20"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "1.2.2", features = ["v4"] }
|
||||
uuid = { version = "1.4.0", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.23", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.8.1"
|
||||
time = "0.3.17"
|
||||
chrono = { version = "0.4.26", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.8.3"
|
||||
time = "0.3.22"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler_ng = "2.0.3"
|
||||
job_scheduler_ng = "2.0.4"
|
||||
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.3.3"
|
||||
data-encoding = "2.4.0"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "8.2.0"
|
||||
jsonwebtoken = "8.3.0"
|
||||
|
||||
# TOTP library
|
||||
totp-lite = "2.0.0"
|
||||
@@ -110,56 +112,72 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
||||
# WebAuthn libraries
|
||||
webauthn-rs = "0.3.2"
|
||||
|
||||
# Handling of URL's for WebAuthn
|
||||
url = "2.3.1"
|
||||
# Handling of URL's for WebAuthn and favicons
|
||||
url = "2.4.0"
|
||||
|
||||
# Email librariese-Base, Update crates and small change.
|
||||
lettre = { version = "0.10.1", features = ["smtp-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.2.0" # URL encoding library used for URL's in the emails
|
||||
# Email libraries
|
||||
lettre = { version = "0.10.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.4"
|
||||
|
||||
# Template library
|
||||
handlebars = { version = "4.3.5", features = ["dir_source"] }
|
||||
# HTML Template library
|
||||
handlebars = { version = "4.3.7", features = ["dir_source"] }
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.13", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||
reqwest = { version = "0.11.18", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
html5gum = "0.5.2"
|
||||
regex = { version = "1.7.0", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.2.0"
|
||||
bytes = "1.3.0"
|
||||
cached = "0.40.0"
|
||||
# Favicon extraction libraries
|
||||
html5gum = "0.5.3"
|
||||
regex = { version = "1.8.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.3.0"
|
||||
bytes = "1.4.0"
|
||||
|
||||
# Cache function results (Used for version check and favicon fetching)
|
||||
cached = "0.44.0"
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.16.1"
|
||||
cookie_store = "0.19.0"
|
||||
cookie = "0.16.2"
|
||||
cookie_store = "0.19.1"
|
||||
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.44"
|
||||
# Used by U2F, JWT and PostgreSQL
|
||||
openssl = "0.10.55"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.10"
|
||||
paste = "1.0.13"
|
||||
governor = "0.5.1"
|
||||
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.14"
|
||||
semver = "1.0.17"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.32", features = ["secure"], default-features = false, optional = true }
|
||||
mimalloc = { version = "0.1.37", features = ["secure"], default-features = false, optional = true }
|
||||
which = "4.4.0"
|
||||
|
||||
# Argon2 library with support for the PHC format
|
||||
argon2 = "0.5.0"
|
||||
|
||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||
rpassword = "7.2.0"
|
||||
|
||||
[patch.crates-io]
|
||||
# Using a patched version of multer-rs (Used by Rocket) to fix attachment/send file uploads
|
||||
# Issue: https://github.com/dani-garcia/vaultwarden/issues/2644
|
||||
# Patch: https://github.com/BlackDex/multer-rs/commit/477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a
|
||||
multer = { git = "https://github.com/BlackDex/multer-rs", rev = "477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a" }
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
||||
# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
||||
|
||||
# Strip debuginfo from the release builds
|
||||
# Also enable thin LTO for some optimizations
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
|
||||
# Always build argon2 using opt-level 3
|
||||
# This is a huge speed improvement during testing
|
||||
[profile.dev.package.argon2]
|
||||
opt-level = 3
|
||||
|
||||
# A little bit of a speedup
|
||||
[profile.dev]
|
||||
split-debuginfo = "unpacked"
|
||||
|
143
LICENSE.txt
143
LICENSE.txt
@@ -1,5 +1,5 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
@@ -7,17 +7,15 @@
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
@@ -26,44 +24,34 @@ them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
@@ -72,7 +60,7 @@ modification follow.
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
@@ -549,35 +537,45 @@ to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
@@ -635,40 +633,29 @@ the "copyright" line and a pointer to where the full notice is found.
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
|
17
README.md
17
README.md
@@ -3,11 +3,13 @@
|
||||
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||
|
||||
---
|
||||
|
||||
[](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml)
|
||||
[](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden)
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://quay.io/repository/vaultwarden/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
@@ -23,23 +25,24 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
||||
Basically full implementation of Bitwarden API is provided including:
|
||||
|
||||
* Organizations support
|
||||
* Attachments
|
||||
* Attachments and Send
|
||||
* Vault API support
|
||||
* Serving the static files for Vault interface
|
||||
* Website icons API
|
||||
* Authenticator and U2F support
|
||||
* YubiKey and Duo support
|
||||
* Emergency Access
|
||||
|
||||
## Installation
|
||||
Pull the docker image and mount a volume from the host for persistent storage:
|
||||
|
||||
```sh
|
||||
docker pull vaultwarden/server:latest
|
||||
docker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server:latest
|
||||
docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p 80:80 vaultwarden/server:latest
|
||||
```
|
||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||
|
||||
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
||||
**IMPORTANT**: Most modern web browsers, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
|
||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
|
||||
@@ -49,9 +52,9 @@ If you have an available domain name, you can get HTTPS certificates with [Let's
|
||||
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||
|
||||
## Get in touch
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://vaultwarden.discourse.group/).
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please use [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [the forum](https://vaultwarden.discourse.group/).
|
||||
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure there aren't any similar issues open, though!
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure you are on the latest version and there aren't any similar issues open, though!
|
||||
|
||||
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||
|
||||
|
2
build.rs
2
build.rs
@@ -72,7 +72,7 @@ fn version_from_git_info() -> Result<String, std::io::Error> {
|
||||
// Combined version
|
||||
if let Some(exact) = exact_tag {
|
||||
Ok(exact)
|
||||
} else if &branch != "main" && &branch != "master" {
|
||||
} else if &branch != "main" && &branch != "master" && &branch != "HEAD" {
|
||||
Ok(format!("{last_tag}-{rev_short} ({branch})"))
|
||||
} else {
|
||||
Ok(format!("{last_tag}-{rev_short}"))
|
||||
|
@@ -2,40 +2,42 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.66-bullseye" %}
|
||||
{% set rust_version = "1.70.0" %}
|
||||
{% set debian_version = "bullseye" %}
|
||||
{% set alpine_version = "3.17" %}
|
||||
{% set build_stage_base_image = "docker.io/library/rust:%s-%s" % (rust_version, debian_version) %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.17" %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:x86_64-musl-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/library/alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.17" %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:armv7-musleabihf-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.17" %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:arm-musleabi-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.17" %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:aarch64-musl-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
{% set runtime_stage_base_image = "debian:bullseye-slim" %}
|
||||
{% set runtime_stage_base_image = "docker.io/library/debian:%s-slim" % debian_version %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:bullseye" %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-debian:%s" % debian_version %}
|
||||
{% set package_arch_name = "arm64" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:bullseye" %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-debian:%s" % debian_version %}
|
||||
{% set package_arch_name = "armel" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:bullseye" %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-debian:%s" % debian_version %}
|
||||
{% set package_arch_name = "armhf" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||
@@ -50,7 +52,7 @@
|
||||
{% else %}
|
||||
{% set package_arch_target_param = "" %}
|
||||
{% endif %}
|
||||
{% if "buildx" in target_file %}
|
||||
{% if "buildkit" in target_file %}
|
||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
||||
{% else %}
|
||||
{% set mount_rust_cache = "" %}
|
||||
@@ -59,8 +61,8 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_version = "v2022.12.0" %}
|
||||
{% set vault_image_digest = "sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e" %}
|
||||
{% set vault_version = "v2023.5.0" %}
|
||||
{% set vault_image_digest = "sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085" %}
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
@@ -70,21 +72,19 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:{{ vault_version }}
|
||||
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" docker.io/vaultwarden/web-vault:{{ vault_version }}
|
||||
# [docker.io/vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [vaultwarden/web-vault:{{ vault_version }}]
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||
#
|
||||
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
FROM docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
@@ -93,7 +93,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -104,21 +103,19 @@ RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
||||
{% endif %}
|
||||
{% elif "arm" in target_file %}
|
||||
#
|
||||
# Install required build libs for {{ package_arch_name }} architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the {{ package_arch_name }} architecture
|
||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
libc6-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
libmariadb3{{ package_arch_prefix }} \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
libmariadb3{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
||||
@@ -130,16 +127,13 @@ ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
|
||||
{% elif "amd64" in target_file %}
|
||||
# Install DB packages
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libmariadb-dev \
|
||||
libpq-dev
|
||||
{% endif %}
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -178,7 +172,6 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
@@ -195,7 +188,6 @@ ENV ROCKET_PROFILE="release" \
|
||||
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
{% endif %}
|
||||
|
||||
@@ -203,18 +195,18 @@ RUN [ "cross-build-start" ]
|
||||
RUN mkdir /data \
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
{% else %}
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
@@ -222,13 +214,11 @@ RUN mkdir /data \
|
||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
{% endif -%}
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
{% endif %}
|
||||
|
||||
|
@@ -8,8 +8,8 @@ all: $(OBJECTS)
|
||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildx: Dockerfile.j2 render_template
|
||||
%/Dockerfile.buildkit: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildx.alpine: Dockerfile.j2 render_template
|
||||
%/Dockerfile.buildkit.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,19 +36,16 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install DB packages
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libpq-dev
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -81,13 +75,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:bullseye-slim
|
||||
FROM docker.io/library/debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -98,11 +91,11 @@ ENV ROCKET_PROFILE="release" \
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.17
|
||||
FROM docker.io/library/alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -93,10 +88,10 @@ ENV ROCKET_PROFILE="release" \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,19 +36,16 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install DB packages
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libpq-dev
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -81,13 +75,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:bullseye-slim
|
||||
FROM docker.io/library/debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -98,11 +91,11 @@ ENV ROCKET_PROFILE="release" \
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.17
|
||||
FROM docker.io/library/alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -93,10 +88,10 @@ ENV ROCKET_PROFILE="release" \
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
|
||||
VOLUME /data
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the arm64 architecture
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libmariadb3:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libssl-dev:arm64 \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,34 +94,31 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.17
|
||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -89,18 +84,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the arm64 architecture
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libmariadb3:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libssl-dev:arm64 \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,34 +94,31 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.17
|
||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -89,18 +84,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armel architecture
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libmariadb3:armel \
|
||||
libpq-dev:armel \
|
||||
libpq5:armel \
|
||||
libssl-dev:armel \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,39 +94,35 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -77,13 +73,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.17
|
||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -91,18 +86,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armel architecture
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libmariadb3:armel \
|
||||
libpq-dev:armel \
|
||||
libpq5:armel \
|
||||
libssl-dev:armel \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,39 +94,35 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -77,13 +73,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.17
|
||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -91,18 +86,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armhf architecture
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libmariadb3:armhf \
|
||||
libpq-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libssl-dev:armhf \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,34 +94,31 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.17
|
||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -89,18 +84,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,26 +36,23 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# hadolint ignore=DL3059
|
||||
# Install build dependencies for the armhf architecture
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libmariadb3:armhf \
|
||||
libpq-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libssl-dev:armhf \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
@@ -71,7 +65,6 @@ ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
@@ -101,34 +94,31 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -2,7 +2,6 @@
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
@@ -16,20 +15,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.66.0 as build
|
||||
|
||||
|
||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -39,7 +36,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
@@ -75,13 +71,12 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.17
|
||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
@@ -89,18 +84,16 @@ ENV ROCKET_PROFILE="release" \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
curl \
|
||||
ca-certificates
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
@@ -1,3 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# The default Debian-based images support these arches for all database backends.
|
||||
arches=(
|
||||
amd64
|
||||
@@ -5,7 +7,9 @@ arches=(
|
||||
armv7
|
||||
arm64
|
||||
)
|
||||
export arches
|
||||
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
distro_suffix=.alpine
|
||||
fi
|
||||
export distro_suffix
|
||||
|
13
hooks/build
13
hooks/build
@@ -1,7 +1,8 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
echo ">>> Building images..."
|
||||
|
||||
# shellcheck source=arches.sh
|
||||
source ./hooks/arches.sh
|
||||
|
||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||
@@ -23,10 +24,10 @@ LABELS=(
|
||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||
org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
|
||||
org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||
)
|
||||
LABEL_ARGS=()
|
||||
@@ -34,9 +35,9 @@ for label in "${LABELS[@]}"; do
|
||||
LABEL_ARGS+=(--label "${label}")
|
||||
done
|
||||
|
||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildx as template
|
||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildkit as template
|
||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
||||
buildx_suffix=.buildx
|
||||
buildkit_suffix=.buildkit
|
||||
fi
|
||||
|
||||
set -ex
|
||||
@@ -45,6 +46,6 @@ for arch in "${arches[@]}"; do
|
||||
docker build \
|
||||
"${LABEL_ARGS[@]}" \
|
||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||
-f docker/${arch}/Dockerfile${buildx_suffix}${distro_suffix} \
|
||||
-f "docker/${arch}/Dockerfile${buildkit_suffix}${distro_suffix}" \
|
||||
.
|
||||
done
|
||||
|
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
|
54
hooks/push
54
hooks/push
@@ -1,5 +1,6 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# shellcheck source=arches.sh
|
||||
source ./hooks/arches.sh
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
@@ -41,7 +42,7 @@ LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||
|
||||
echo ">>> Pushing images to local registry..."
|
||||
|
||||
for arch in ${arches[@]}; do
|
||||
for arch in "${arches[@]}"; do
|
||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||
docker tag "${docker_image}" "${local_image}"
|
||||
@@ -71,9 +72,9 @@ tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||
# to make it easier for users to track the latest release.
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
tags+=(${DOCKER_REPO}:alpine)
|
||||
tags+=("${DOCKER_REPO}:alpine")
|
||||
else
|
||||
tags+=(${DOCKER_REPO}:latest)
|
||||
tags+=("${DOCKER_REPO}:latest")
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -91,10 +92,10 @@ declare -A arch_to_platform=(
|
||||
[arm64]="linux/arm64"
|
||||
)
|
||||
platforms=()
|
||||
for arch in ${arches[@]}; do
|
||||
for arch in "${arches[@]}"; do
|
||||
platforms+=("${arch_to_platform[$arch]}")
|
||||
done
|
||||
platforms="$(join "," "${platforms[@]}")"
|
||||
platform="$(join "," "${platforms[@]}")"
|
||||
|
||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||
@@ -104,46 +105,7 @@ docker buildx build \
|
||||
--network host \
|
||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||
--platform "${platforms}" \
|
||||
--platform "${platform}" \
|
||||
"${tag_args[@]}" \
|
||||
--push \
|
||||
- < ./docker/Dockerfile.buildx
|
||||
|
||||
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
|
||||
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
#
|
||||
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||
# existing vaultwarden tags, which adhere to the naming conventions of the
|
||||
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
|
||||
#
|
||||
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
|
||||
# so this step can be removed once fixed versions are in wider distribution.
|
||||
#
|
||||
# Tags:
|
||||
#
|
||||
# testing => testing-arm32v6
|
||||
# testing-alpine => <ignored>
|
||||
# x.y.z => x.y.z-arm32v6, latest-arm32v6
|
||||
# x.y.z-alpine => <ignored>
|
||||
#
|
||||
if [[ "${DOCKER_TAG}" != *alpine ]]; then
|
||||
image="${DOCKER_REPO}":"${DOCKER_TAG}"
|
||||
|
||||
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
|
||||
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
|
||||
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
|
||||
|
||||
# Pull the armv6 image by digest, retag it, and repush it.
|
||||
docker pull "${DOCKER_REPO}"@"${digest}"
|
||||
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
|
||||
docker push "${image}"-arm32v6
|
||||
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
|
||||
docker push "${DOCKER_REPO}:latest"-arm32v6
|
||||
fi
|
||||
fi
|
||||
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN reset_password_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN avatar_color VARCHAR(7);
|
7
migrations/mysql/2023-01-31-222222_add_argon2/up.sql
Normal file
7
migrations/mysql/2023-01-31-222222_add_argon2/up.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_memory INTEGER DEFAULT NULL;
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_parallelism INTEGER DEFAULT NULL;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE devices ADD COLUMN push_uuid TEXT;
|
@@ -0,0 +1,10 @@
|
||||
CREATE TABLE organization_api_key (
|
||||
uuid CHAR(36) NOT NULL,
|
||||
org_uuid CHAR(36) NOT NULL REFERENCES organizations(uuid),
|
||||
atype INTEGER NOT NULL,
|
||||
api_key VARCHAR(255) NOT NULL,
|
||||
revision_date DATETIME NOT NULL,
|
||||
PRIMARY KEY(uuid, org_uuid)
|
||||
);
|
||||
|
||||
ALTER TABLE users ADD COLUMN external_id TEXT;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE collections ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN reset_password_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN avatar_color TEXT;
|
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_memory INTEGER DEFAULT NULL;
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_parallelism INTEGER DEFAULT NULL;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE devices ADD COLUMN push_uuid TEXT;
|
@@ -0,0 +1,10 @@
|
||||
CREATE TABLE organization_api_key (
|
||||
uuid CHAR(36) NOT NULL,
|
||||
org_uuid CHAR(36) NOT NULL REFERENCES organizations(uuid),
|
||||
atype INTEGER NOT NULL,
|
||||
api_key VARCHAR(255),
|
||||
revision_date TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY(uuid, org_uuid)
|
||||
);
|
||||
|
||||
ALTER TABLE users ADD COLUMN external_id TEXT;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE collections ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN reset_password_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN avatar_color TEXT;
|
7
migrations/sqlite/2023-01-31-222222_add_argon2/up.sql
Normal file
7
migrations/sqlite/2023-01-31-222222_add_argon2/up.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_memory INTEGER DEFAULT NULL;
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_parallelism INTEGER DEFAULT NULL;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE devices ADD COLUMN push_uuid TEXT;
|
@@ -0,0 +1,11 @@
|
||||
CREATE TABLE organization_api_key (
|
||||
uuid TEXT NOT NULL,
|
||||
org_uuid TEXT NOT NULL,
|
||||
atype INTEGER NOT NULL,
|
||||
api_key TEXT NOT NULL,
|
||||
revision_date DATETIME NOT NULL,
|
||||
PRIMARY KEY(uuid, org_uuid),
|
||||
FOREIGN KEY(org_uuid) REFERENCES organizations(uuid)
|
||||
);
|
||||
|
||||
ALTER TABLE users ADD COLUMN external_id TEXT;
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE collections ADD COLUMN external_id TEXT;
|
@@ -1 +1 @@
|
||||
1.66.0
|
||||
1.70.0
|
||||
|
@@ -1,7 +1,4 @@
|
||||
# version = "Two"
|
||||
edition = "2021"
|
||||
max_width = 120
|
||||
newline_style = "Unix"
|
||||
use_small_heuristics = "Off"
|
||||
# struct_lit_single_line = false
|
||||
# overflow_delimited_expr = true
|
||||
|
262
src/api/admin.rs
262
src/api/admin.rs
@@ -13,7 +13,7 @@ use rocket::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::{core::log_event, ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||
api::{core::log_event, unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||
@@ -33,8 +33,10 @@ pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
get_users_json,
|
||||
get_user_json,
|
||||
get_user_by_mail_json,
|
||||
post_admin_login,
|
||||
admin_page,
|
||||
admin_page_login,
|
||||
invite_user,
|
||||
logout,
|
||||
delete_user,
|
||||
@@ -52,7 +54,8 @@ pub fn routes() -> Vec<Route> {
|
||||
organizations_overview,
|
||||
delete_organization,
|
||||
diagnostics,
|
||||
get_diagnostics_config
|
||||
get_diagnostics_config,
|
||||
resend_user_invite,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -144,7 +147,6 @@ fn render_admin_login(msg: Option<&str>, redirect: Option<String>) -> ApiResult<
|
||||
let msg = msg.map(|msg| format!("Error: {msg}"));
|
||||
let json = json!({
|
||||
"page_content": "admin/login",
|
||||
"version": VERSION,
|
||||
"error": msg,
|
||||
"redirect": redirect,
|
||||
"urlpath": CONFIG.domain_path()
|
||||
@@ -184,7 +186,7 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
||||
|
||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||
.path(admin_path())
|
||||
.max_age(rocket::time::Duration::minutes(20))
|
||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||
.same_site(SameSite::Strict)
|
||||
.http_only(true)
|
||||
.finish();
|
||||
@@ -201,6 +203,19 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
||||
fn _validate_token(token: &str) -> bool {
|
||||
match CONFIG.admin_token().as_ref() {
|
||||
None => false,
|
||||
Some(t) if t.starts_with("$argon2") => {
|
||||
use argon2::password_hash::PasswordVerifier;
|
||||
match argon2::password_hash::PasswordHash::new(t) {
|
||||
Ok(h) => {
|
||||
// NOTE: hash params from `ADMIN_TOKEN` are used instead of what is configured in the `Argon2` instance.
|
||||
argon2::Argon2::default().verify_password(token.trim().as_ref(), &h).is_ok()
|
||||
}
|
||||
Err(e) => {
|
||||
error!("The configured Argon2 PHC in `ADMIN_TOKEN` is invalid: {e}");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(t) => crate::crypto::ct_eq(t.trim(), token.trim()),
|
||||
}
|
||||
}
|
||||
@@ -208,34 +223,16 @@ fn _validate_token(token: &str) -> bool {
|
||||
#[derive(Serialize)]
|
||||
struct AdminTemplateData {
|
||||
page_content: String,
|
||||
version: Option<&'static str>,
|
||||
page_data: Option<Value>,
|
||||
config: Value,
|
||||
can_backup: bool,
|
||||
logged_in: bool,
|
||||
urlpath: String,
|
||||
}
|
||||
|
||||
impl AdminTemplateData {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/settings"),
|
||||
version: VERSION,
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
page_data: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn with_data(page_content: &str, page_data: Value) -> Self {
|
||||
fn new(page_content: &str, page_data: Value) -> Self {
|
||||
Self {
|
||||
page_content: String::from(page_content),
|
||||
version: VERSION,
|
||||
page_data: Some(page_data),
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
}
|
||||
@@ -247,7 +244,11 @@ impl AdminTemplateData {
|
||||
}
|
||||
|
||||
fn render_admin_page() -> ApiResult<Html<String>> {
|
||||
let text = AdminTemplateData::new().render()?;
|
||||
let settings_json = json!({
|
||||
"config": CONFIG.prepare_json(),
|
||||
"can_backup": *CAN_BACKUP,
|
||||
});
|
||||
let text = AdminTemplateData::new("admin/settings", settings_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
@@ -256,6 +257,11 @@ fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
|
||||
render_admin_page()
|
||||
}
|
||||
|
||||
#[get("/", rank = 2)]
|
||||
fn admin_page_login() -> ApiResult<Html<String>> {
|
||||
render_admin_login(None, None)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct InviteData {
|
||||
@@ -314,8 +320,9 @@ fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||
|
||||
#[get("/users")]
|
||||
async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
||||
let mut users_json = Vec::new();
|
||||
for u in User::get_all(&mut conn).await {
|
||||
let users = User::get_all(&mut conn).await;
|
||||
let mut users_json = Vec::with_capacity(users.len());
|
||||
for u in users {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
@@ -327,8 +334,9 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
||||
|
||||
#[get("/users/overview")]
|
||||
async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let mut users_json = Vec::new();
|
||||
for u in User::get_all(&mut conn).await {
|
||||
let users = User::get_all(&mut conn).await;
|
||||
let mut users_json = Vec::with_capacity(users.len());
|
||||
for u in users {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||
@@ -342,13 +350,25 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
||||
users_json.push(usr);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?;
|
||||
let text = AdminTemplateData::new("admin/users", json!(users_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/users/by-mail/<mail>")]
|
||||
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>")]
|
||||
async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(&uuid, &mut conn).await?;
|
||||
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(uuid, &mut conn).await?;
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
@@ -356,21 +376,21 @@ async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> Js
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn, ip: ClientIp) -> EmptyResult {
|
||||
let user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(uuid, &mut conn).await?;
|
||||
|
||||
// Get the user_org records before deleting the actual user
|
||||
let user_orgs = UserOrganization::find_any_state_by_user(&uuid, &mut conn).await;
|
||||
let user_orgs = UserOrganization::find_any_state_by_user(uuid, &mut conn).await;
|
||||
let res = user.delete(&mut conn).await;
|
||||
|
||||
for user_org in user_orgs {
|
||||
log_event(
|
||||
EventType::OrganizationUserRemoved as i32,
|
||||
&user_org.uuid,
|
||||
user_org.org_uuid,
|
||||
&user_org.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&ip.ip,
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -380,8 +400,20 @@ async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn, ip: Cli
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
async fn deauth_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||
match unregister_push_device(device.uuid).await {
|
||||
Ok(r) => r,
|
||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
@@ -389,31 +421,53 @@ async fn deauth_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> Empt
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
async fn disable_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
|
||||
user.save(&mut conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
async fn enable_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
user.enabled = true;
|
||||
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
async fn remove_2fa(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
async fn remove_2fa(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.totp_recover = None;
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/invite/resend")]
|
||||
async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
if let Some(user) = User::find_by_uuid(uuid, &mut conn).await {
|
||||
//TODO: replace this with user.status check when it will be available (PR#3397)
|
||||
if !user.password_hash.is_empty() {
|
||||
err_code!("User already accepted invitation", Status::BadRequest.code);
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct UserOrgTypeData {
|
||||
user_type: NumberOrString,
|
||||
@@ -422,12 +476,7 @@ struct UserOrgTypeData {
|
||||
}
|
||||
|
||||
#[post("/users/org_type", data = "<data>")]
|
||||
async fn update_user_org_type(
|
||||
data: Json<UserOrgTypeData>,
|
||||
_token: AdminToken,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let data: UserOrgTypeData = data.into_inner();
|
||||
|
||||
let mut user_to_edit =
|
||||
@@ -442,7 +491,7 @@ async fn update_user_org_type(
|
||||
};
|
||||
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there is at least one other confirmed owner
|
||||
// Removing owner permission, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 {
|
||||
err!("Can't change the type of the last owner")
|
||||
}
|
||||
@@ -465,10 +514,10 @@ async fn update_user_org_type(
|
||||
log_event(
|
||||
EventType::OrganizationUserUpdated as i32,
|
||||
&user_to_edit.uuid,
|
||||
data.org_uuid,
|
||||
&data.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&ip.ip,
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -484,23 +533,27 @@ async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyRes
|
||||
|
||||
#[get("/organizations/overview")]
|
||||
async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let mut organizations_json = Vec::new();
|
||||
for o in Organization::get_all(&mut conn).await {
|
||||
let organizations = Organization::get_all(&mut conn).await;
|
||||
let mut organizations_json = Vec::with_capacity(organizations.len());
|
||||
for o in organizations {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
||||
organizations_json.push(org);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?;
|
||||
let text = AdminTemplateData::new("admin/organizations", json!(organizations_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/organizations/<uuid>/delete")]
|
||||
async fn delete_organization(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &mut conn).await.map_res("Organization doesn't exist")?;
|
||||
async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(uuid, &mut conn).await.map_res("Organization doesn't exist")?;
|
||||
org.delete(&mut conn).await
|
||||
}
|
||||
|
||||
@@ -519,10 +572,20 @@ struct GitCommit {
|
||||
sha: String,
|
||||
}
|
||||
|
||||
async fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let github_api = get_reqwest_client();
|
||||
#[derive(Deserialize)]
|
||||
struct TimeApi {
|
||||
year: u16,
|
||||
month: u8,
|
||||
day: u8,
|
||||
hour: u8,
|
||||
minute: u8,
|
||||
seconds: u8,
|
||||
}
|
||||
|
||||
Ok(github_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let json_api = get_reqwest_client();
|
||||
|
||||
Ok(json_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
}
|
||||
|
||||
async fn has_http_access() -> bool {
|
||||
@@ -542,14 +605,13 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
if has_http_access {
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||
match get_json_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await
|
||||
{
|
||||
match get_json_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await {
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
@@ -561,7 +623,7 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
||||
if running_within_docker {
|
||||
"-".to_string()
|
||||
} else {
|
||||
match get_github_api::<GitRelease>(
|
||||
match get_json_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
)
|
||||
.await
|
||||
@@ -576,6 +638,24 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_ntp_time(has_http_access: bool) -> String {
|
||||
if has_http_access {
|
||||
if let Ok(ntp_time) = get_json_api::<TimeApi>("https://www.timeapi.io/api/Time/current/zone?timeZone=UTC").await
|
||||
{
|
||||
return format!(
|
||||
"{year}-{month:02}-{day:02} {hour:02}:{minute:02}:{seconds:02} UTC",
|
||||
year = ntp_time.year,
|
||||
month = ntp_time.month,
|
||||
day = ntp_time.day,
|
||||
hour = ntp_time.hour,
|
||||
minute = ntp_time.minute,
|
||||
seconds = ntp_time.seconds
|
||||
);
|
||||
}
|
||||
}
|
||||
String::from("Unable to fetch NTP time.")
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use chrono::prelude::*;
|
||||
@@ -604,7 +684,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
_ => "Unable to resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) =
|
||||
@@ -617,13 +697,14 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
"current_release": VERSION,
|
||||
"latest_release": latest_release,
|
||||
"latest_commit": latest_commit,
|
||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
||||
"latest_web_build": latest_web_build,
|
||||
"running_within_docker": running_within_docker,
|
||||
"docker_base_image": docker_base_image(),
|
||||
"docker_base_image": if running_within_docker { docker_base_image() } else { "Not applicable" },
|
||||
"has_http_access": has_http_access,
|
||||
"ip_header_exists": &ip_header.0.is_some(),
|
||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||
@@ -634,11 +715,14 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
"db_version": get_sql_server_version(&mut conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"host_arch": std::env::consts::ARCH,
|
||||
"host_os": std::env::consts::OS,
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
|
||||
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
|
||||
});
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/diagnostics", diagnostics_json).render()?;
|
||||
let text = AdminTemplateData::new("admin/diagnostics", diagnostics_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
@@ -668,36 +752,52 @@ async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AdminToken {}
|
||||
pub struct AdminToken {
|
||||
ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for AdminToken {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
if CONFIG.disable_admin_token() {
|
||||
Outcome::Success(Self {})
|
||||
Outcome::Success(Self {
|
||||
ip,
|
||||
})
|
||||
} else {
|
||||
let cookies = request.cookies();
|
||||
|
||||
let access_token = match cookies.get(COOKIE_NAME) {
|
||||
Some(cookie) => cookie.value(),
|
||||
None => return Outcome::Failure((Status::Unauthorized, "Unauthorized")),
|
||||
};
|
||||
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip.ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
None => {
|
||||
let requested_page =
|
||||
request.segments::<std::path::PathBuf>(0..).unwrap_or_default().display().to_string();
|
||||
// When the requested page is empty, it is `/admin`, in that case, Forward, so it will render the login page
|
||||
// Else, return a 401 failure, which will be caught
|
||||
if requested_page.is_empty() {
|
||||
return Outcome::Forward(Status::Unauthorized);
|
||||
} else {
|
||||
return Outcome::Failure((Status::Unauthorized, "Unauthorized"));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if decode_admin(access_token).is_err() {
|
||||
// Remove admin cookie
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
||||
}
|
||||
|
||||
Outcome::Success(Self {})
|
||||
Outcome::Success(Self {
|
||||
ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -4,14 +4,20 @@ use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::log_user_event, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType,
|
||||
core::log_user_event, register_push_device, unregister_push_device, EmptyResult, JsonResult, JsonUpcase,
|
||||
Notify, NumberOrString, PasswordData, UpdateType,
|
||||
},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, ClientIp, Headers},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
use rocket::{
|
||||
http::Status,
|
||||
request::{FromRequest, Outcome, Request},
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
register,
|
||||
@@ -30,6 +36,7 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
post_verify_email_token,
|
||||
post_delete_recover,
|
||||
post_delete_recover_token,
|
||||
post_device_token,
|
||||
delete_account,
|
||||
post_delete_account,
|
||||
revision_date,
|
||||
@@ -39,6 +46,11 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
api_key,
|
||||
rotate_api_key,
|
||||
get_known_device,
|
||||
get_known_device_from_path,
|
||||
put_avatar,
|
||||
put_device_token,
|
||||
put_clear_device_token,
|
||||
post_clear_device_token,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -48,6 +60,8 @@ pub struct RegisterData {
|
||||
Email: String,
|
||||
Kdf: Option<i32>,
|
||||
KdfIterations: Option<i32>,
|
||||
KdfMemory: Option<i32>,
|
||||
KdfParallelism: Option<i32>,
|
||||
Key: String,
|
||||
Keys: Option<KeysData>,
|
||||
MasterPasswordHash: String,
|
||||
@@ -124,7 +138,7 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
||||
err!("Registration email does not match invite email")
|
||||
}
|
||||
} else if Invitation::take(&email, &mut conn).await {
|
||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
|
||||
for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
|
||||
user_org.status = UserOrgStatus::Accepted as i32;
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
@@ -152,16 +166,18 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
||||
// Make sure we don't leave a lingering invitation.
|
||||
Invitation::take(&email, &mut conn).await;
|
||||
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
}
|
||||
|
||||
if let Some(client_kdf_type) = data.Kdf {
|
||||
user.client_kdf_type = client_kdf_type;
|
||||
}
|
||||
|
||||
user.set_password(&data.MasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
}
|
||||
|
||||
user.client_kdf_memory = data.KdfMemory;
|
||||
user.client_kdf_parallelism = data.KdfParallelism;
|
||||
|
||||
user.set_password(&data.MasterPasswordHash, Some(data.Key), true, None);
|
||||
user.password_hint = password_hint;
|
||||
|
||||
// Add extra fields if present
|
||||
@@ -228,9 +244,35 @@ async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, mut conn:
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct AvatarData {
|
||||
AvatarColor: Option<String>,
|
||||
}
|
||||
|
||||
#[put("/accounts/avatar", data = "<data>")]
|
||||
async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: AvatarData = data.into_inner().data;
|
||||
|
||||
// It looks like it only supports the 6 hex color format.
|
||||
// If you try to add the short value it will not show that color.
|
||||
// Check and force 7 chars, including the #.
|
||||
if let Some(color) = &data.AvatarColor {
|
||||
if color.len() != 7 {
|
||||
err!("The field AvatarColor must be a HTML/Hex color code with a length of 7 characters")
|
||||
}
|
||||
}
|
||||
|
||||
let mut user = headers.user;
|
||||
user.avatar_color = data.AvatarColor;
|
||||
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>/public-key")]
|
||||
async fn get_public_keys(uuid: String, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&uuid, &mut conn).await {
|
||||
async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -274,7 +316,7 @@ async fn post_password(
|
||||
data: JsonUpcase<ChangePassData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: ChangePassData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
@@ -286,14 +328,24 @@ async fn post_password(
|
||||
user.password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&user.password_hint)?;
|
||||
|
||||
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn)
|
||||
.await;
|
||||
|
||||
user.set_password(
|
||||
&data.NewMasterPasswordHash,
|
||||
Some(data.Key),
|
||||
true,
|
||||
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
|
||||
);
|
||||
user.akey = data.Key;
|
||||
user.save(&mut conn).await
|
||||
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
// Prevent loging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -301,6 +353,8 @@ async fn post_password(
|
||||
struct ChangeKdfData {
|
||||
Kdf: i32,
|
||||
KdfIterations: i32,
|
||||
KdfMemory: Option<i32>,
|
||||
KdfParallelism: Option<i32>,
|
||||
|
||||
MasterPasswordHash: String,
|
||||
NewMasterPasswordHash: String,
|
||||
@@ -308,7 +362,7 @@ struct ChangeKdfData {
|
||||
}
|
||||
|
||||
#[post("/accounts/kdf", data = "<data>")]
|
||||
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let data: ChangeKdfData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -316,11 +370,42 @@ async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: D
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if data.Kdf == UserKdfType::Pbkdf2 as i32 && data.KdfIterations < 100_000 {
|
||||
err!("PBKDF2 KDF iterations must be at least 100000.")
|
||||
}
|
||||
|
||||
if data.Kdf == UserKdfType::Argon2id as i32 {
|
||||
if data.KdfIterations < 1 {
|
||||
err!("Argon2 KDF iterations must be at least 1.")
|
||||
}
|
||||
if let Some(m) = data.KdfMemory {
|
||||
if !(15..=1024).contains(&m) {
|
||||
err!("Argon2 memory must be between 15 MB and 1024 MB.")
|
||||
}
|
||||
user.client_kdf_memory = data.KdfMemory;
|
||||
} else {
|
||||
err!("Argon2 memory parameter is required.")
|
||||
}
|
||||
if let Some(p) = data.KdfParallelism {
|
||||
if !(1..=16).contains(&p) {
|
||||
err!("Argon2 parallelism must be between 1 and 16.")
|
||||
}
|
||||
user.client_kdf_parallelism = data.KdfParallelism;
|
||||
} else {
|
||||
err!("Argon2 parallelism parameter is required.")
|
||||
}
|
||||
} else {
|
||||
user.client_kdf_memory = None;
|
||||
user.client_kdf_parallelism = None;
|
||||
}
|
||||
user.client_kdf_iter = data.KdfIterations;
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.save(&mut conn).await
|
||||
user.set_password(&data.NewMasterPasswordHash, Some(data.Key), true, None);
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -343,19 +428,19 @@ struct KeyData {
|
||||
}
|
||||
|
||||
#[post("/accounts/key", data = "<data>")]
|
||||
async fn post_rotatekey(
|
||||
data: JsonUpcase<KeyData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let data: KeyData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
// Validate the import before continuing
|
||||
// Bitwarden does not process the import if there is one item invalid.
|
||||
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
||||
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||
Cipher::validate_notes(&data.Ciphers)?;
|
||||
|
||||
let user_uuid = &headers.user.uuid;
|
||||
|
||||
// Update folder data
|
||||
@@ -388,7 +473,8 @@ async fn post_rotatekey(
|
||||
|
||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None)
|
||||
// We force the users to logout after the user has been saved to try and prevent these issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None)
|
||||
.await?
|
||||
}
|
||||
|
||||
@@ -399,11 +485,23 @@ async fn post_rotatekey(
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&mut conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
// Prevent loging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/accounts/security-stamp", data = "<data>")]
|
||||
async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn post_sstamp(
|
||||
data: JsonUpcase<PasswordData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -413,7 +511,11 @@ async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, mut conn:
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.save(&mut conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -465,7 +567,12 @@ struct ChangeEmailData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email", data = "<data>")]
|
||||
async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn post_email(
|
||||
data: JsonUpcase<ChangeEmailData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: ChangeEmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -505,10 +612,13 @@ async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, mut con
|
||||
user.email_new = None;
|
||||
user.email_new_token = None;
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.set_password(&data.NewMasterPasswordHash, Some(data.Key), true, None);
|
||||
|
||||
user.save(&mut conn).await
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email")]
|
||||
@@ -629,9 +739,9 @@ async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut co
|
||||
}
|
||||
|
||||
#[get("/accounts/revision-date")]
|
||||
fn revision_date(headers: Headers) -> String {
|
||||
fn revision_date(headers: Headers) -> JsonResult {
|
||||
let revision_date = headers.user.updated_at.timestamp_millis();
|
||||
revision_date.to_string()
|
||||
Ok(Json(json!(revision_date)))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -674,7 +784,7 @@ async fn password_hint(data: JsonUpcase<PasswordHintData>, mut conn: DbConn) ->
|
||||
mail::send_password_hint(email, hint).await?;
|
||||
Ok(())
|
||||
} else if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", hint));
|
||||
err!(format!("Your password hint is: {hint}"));
|
||||
} else {
|
||||
err!(NO_HINT);
|
||||
}
|
||||
@@ -696,15 +806,19 @@ async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
pub async fn _prelogin(data: JsonUpcase<PreloginData>, mut conn: DbConn) -> Json<Value> {
|
||||
let data: PreloginData = data.into_inner().data;
|
||||
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||
let (kdf_type, kdf_iter, kdf_mem, kdf_para) = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter, user.client_kdf_memory, user.client_kdf_parallelism),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT, None, None),
|
||||
};
|
||||
|
||||
Json(json!({
|
||||
let result = json!({
|
||||
"Kdf": kdf_type,
|
||||
"KdfIterations": kdf_iter
|
||||
}))
|
||||
"KdfIterations": kdf_iter,
|
||||
"KdfMemory": kdf_mem,
|
||||
"KdfParallelism": kdf_para,
|
||||
});
|
||||
|
||||
Json(result)
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/master/src/Api/Models/Request/Accounts/SecretVerificationRequestModel.cs
|
||||
@@ -732,6 +846,8 @@ async fn _api_key(
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
use crate::util::format_date;
|
||||
|
||||
let data: SecretVerificationRequest = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -746,6 +862,7 @@ async fn _api_key(
|
||||
|
||||
Ok(Json(json!({
|
||||
"ApiKey": user.api_key,
|
||||
"RevisionDate": format_date(&user.updated_at),
|
||||
"Object": "apiKey",
|
||||
})))
|
||||
}
|
||||
@@ -760,15 +877,122 @@ async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: He
|
||||
_api_key(data, true, headers, conn).await
|
||||
}
|
||||
|
||||
// This variant is deprecated: https://github.com/bitwarden/server/pull/2682
|
||||
#[get("/devices/knowndevice/<email>/<uuid>")]
|
||||
async fn get_known_device(email: String, uuid: String, mut conn: DbConn) -> String {
|
||||
async fn get_known_device_from_path(email: &str, uuid: &str, mut conn: DbConn) -> JsonResult {
|
||||
// This endpoint doesn't have auth header
|
||||
if let Some(user) = User::find_by_mail(&email, &mut conn).await {
|
||||
match Device::find_by_uuid_and_user(&uuid, &user.uuid, &mut conn).await {
|
||||
Some(_) => String::from("true"),
|
||||
_ => String::from("false"),
|
||||
let mut result = false;
|
||||
if let Some(user) = User::find_by_mail(email, &mut conn).await {
|
||||
result = Device::find_by_uuid_and_user(uuid, &user.uuid, &mut conn).await.is_some();
|
||||
}
|
||||
Ok(Json(json!(result)))
|
||||
}
|
||||
|
||||
#[get("/devices/knowndevice")]
|
||||
async fn get_known_device(device: KnownDevice, conn: DbConn) -> JsonResult {
|
||||
get_known_device_from_path(&device.email, &device.uuid, conn).await
|
||||
}
|
||||
|
||||
struct KnownDevice {
|
||||
email: String,
|
||||
uuid: String,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for KnownDevice {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
|
||||
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
||||
Ok(bytes) => bytes,
|
||||
Err(_) => {
|
||||
return Outcome::Failure((
|
||||
Status::BadRequest,
|
||||
"X-Request-Email value failed to decode as base64url",
|
||||
));
|
||||
}
|
||||
};
|
||||
match String::from_utf8(email_bytes) {
|
||||
Ok(email) => email,
|
||||
Err(_) => {
|
||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
String::from("false")
|
||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value is required"));
|
||||
};
|
||||
|
||||
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
||||
uuid.to_string()
|
||||
} else {
|
||||
return Outcome::Failure((Status::BadRequest, "X-Device-Identifier value is required"));
|
||||
};
|
||||
|
||||
Outcome::Success(KnownDevice {
|
||||
email,
|
||||
uuid,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct PushToken {
|
||||
PushToken: String,
|
||||
}
|
||||
|
||||
#[post("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
async fn post_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
put_device_token(uuid, data, headers, conn).await
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let data = data.into_inner().data;
|
||||
let token = data.PushToken;
|
||||
let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await {
|
||||
Some(device) => device,
|
||||
None => err!(format!("Error: device {uuid} should be present before a token can be assigned")),
|
||||
};
|
||||
device.push_token = Some(token);
|
||||
if device.push_uuid.is_none() {
|
||||
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
||||
}
|
||||
if let Err(e) = device.save(&mut conn).await {
|
||||
err!(format!("An error occured while trying to save the device push token: {e}"));
|
||||
}
|
||||
if let Err(e) = register_push_device(headers.user.uuid, device).await {
|
||||
err!(format!("An error occured while proceeding registration of a device: {e}"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
||||
// This only clears push token
|
||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
// This is somehow not implemented in any app, added it in case it is required
|
||||
if !CONFIG.push_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await {
|
||||
Device::clear_push_token_by_uuid(uuid, &mut conn).await?;
|
||||
unregister_push_device(device.uuid).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// On upstream server, both PUT and POST are declared. Implementing the POST method in case it would be useful somewhere
|
||||
#[post("/devices/identifier/<uuid>/clear-token")]
|
||||
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
|
||||
put_clear_device_token(uuid, conn).await
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -71,10 +71,10 @@ async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[get("/emergency-access/<emer_id>")]
|
||||
async fn get_emergency_access(emer_id: String, mut conn: DbConn) -> JsonResult {
|
||||
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
||||
None => err!("Emergency access not valid."),
|
||||
}
|
||||
@@ -93,17 +93,13 @@ struct EmergencyAccessUpdateData {
|
||||
}
|
||||
|
||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn put_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
||||
post_emergency_access(emer_id, data, conn).await
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn post_emergency_access(
|
||||
emer_id: String,
|
||||
emer_id: &str,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
@@ -111,7 +107,7 @@ async fn post_emergency_access(
|
||||
|
||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emergency_access) => emergency_access,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -123,7 +119,9 @@ async fn post_emergency_access(
|
||||
|
||||
emergency_access.atype = new_type;
|
||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
||||
if data.KeyEncrypted.is_some() {
|
||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||
}
|
||||
|
||||
emergency_access.save(&mut conn).await?;
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
@@ -134,12 +132,12 @@ async fn post_emergency_access(
|
||||
// region delete
|
||||
|
||||
#[delete("/emergency-access/<emer_id>")]
|
||||
async fn delete_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let grantor_user = headers.user;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => {
|
||||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
||||
err!("Emergency access not valid.")
|
||||
@@ -153,7 +151,7 @@ async fn delete_emergency_access(emer_id: String, headers: Headers, mut conn: Db
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/delete")]
|
||||
async fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_emergency_access(emer_id, headers, conn).await
|
||||
}
|
||||
|
||||
@@ -241,7 +239,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||
} else {
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(user) => match accept_invite_process(user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
||||
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
},
|
||||
@@ -253,10 +251,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||
async fn resend_invite(emer_id: String, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -297,7 +295,7 @@ async fn resend_invite(emer_id: String, headers: Headers, mut conn: DbConn) -> E
|
||||
}
|
||||
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match accept_invite_process(grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
@@ -313,12 +311,7 @@ struct AcceptData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||
async fn accept_invite(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<AcceptData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: AcceptData = data.into_inner().data;
|
||||
@@ -339,7 +332,7 @@ async fn accept_invite(
|
||||
None => err!("Invited user not found"),
|
||||
};
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -354,7 +347,7 @@ async fn accept_invite(
|
||||
&& grantor_user.name == claims.grantor_name
|
||||
&& grantor_user.email == claims.grantor_email
|
||||
{
|
||||
match accept_invite_process(grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
@@ -370,7 +363,7 @@ async fn accept_invite(
|
||||
}
|
||||
|
||||
async fn accept_invite_process(
|
||||
grantee_uuid: String,
|
||||
grantee_uuid: &str,
|
||||
emergency_access: &mut EmergencyAccess,
|
||||
grantee_email: &str,
|
||||
conn: &mut DbConn,
|
||||
@@ -384,7 +377,7 @@ async fn accept_invite_process(
|
||||
}
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
||||
emergency_access.grantee_uuid = Some(grantee_uuid);
|
||||
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
|
||||
emergency_access.email = None;
|
||||
emergency_access.save(conn).await
|
||||
}
|
||||
@@ -397,7 +390,7 @@ struct ConfirmData {
|
||||
|
||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||
async fn confirm_emergency_access(
|
||||
emer_id: String,
|
||||
emer_id: &str,
|
||||
data: JsonUpcase<ConfirmData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -408,7 +401,7 @@ async fn confirm_emergency_access(
|
||||
let data: ConfirmData = data.into_inner().data;
|
||||
let key = data.Key;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -450,11 +443,11 @@ async fn confirm_emergency_access(
|
||||
// region access emergency access
|
||||
|
||||
#[post("/emergency-access/<emer_id>/initiate")]
|
||||
async fn initiate_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let initiating_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -490,10 +483,10 @@ async fn initiate_emergency_access(emer_id: String, headers: Headers, mut conn:
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/approve")]
|
||||
async fn approve_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -528,10 +521,10 @@ async fn approve_emergency_access(emer_id: String, headers: Headers, mut conn: D
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reject")]
|
||||
async fn reject_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -571,26 +564,33 @@ async fn reject_emergency_access(emer_id: String, headers: Headers, mut conn: Db
|
||||
// region action
|
||||
|
||||
#[post("/emergency-access/<emer_id>/view")]
|
||||
async fn view_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, headers.user.uuid, EmergencyAccessType::View) {
|
||||
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &mut conn).await;
|
||||
let cipher_sync_data =
|
||||
CipherSyncData::new(&emergency_access.grantor_uuid, &ciphers, CipherSyncType::User, &mut conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(&emergency_access.grantor_uuid, CipherSyncType::User, &mut conn).await;
|
||||
|
||||
let mut ciphers_json = Vec::new();
|
||||
let mut ciphers_json = Vec::with_capacity(ciphers.len());
|
||||
for c in ciphers {
|
||||
ciphers_json
|
||||
.push(c.to_json(&headers.host, &emergency_access.grantor_uuid, Some(&cipher_sync_data), &mut conn).await);
|
||||
ciphers_json.push(
|
||||
c.to_json(
|
||||
&headers.host,
|
||||
&emergency_access.grantor_uuid,
|
||||
Some(&cipher_sync_data),
|
||||
CipherSyncType::User,
|
||||
&mut conn,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -601,16 +601,16 @@ async fn view_emergency_access(emer_id: String, headers: Headers, mut conn: DbCo
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/takeover")]
|
||||
async fn takeover_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
@@ -619,12 +619,16 @@ async fn takeover_emergency_access(emer_id: String, headers: Headers, mut conn:
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
let result = json!({
|
||||
"Kdf": grantor_user.client_kdf_type,
|
||||
"KdfIterations": grantor_user.client_kdf_iter,
|
||||
"KdfMemory": grantor_user.client_kdf_memory,
|
||||
"KdfParallelism": grantor_user.client_kdf_parallelism,
|
||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||
"Object": "emergencyAccessTakeover",
|
||||
})))
|
||||
});
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -636,7 +640,7 @@ struct EmergencyAccessPasswordData {
|
||||
|
||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||
async fn password_emergency_access(
|
||||
emer_id: String,
|
||||
emer_id: &str,
|
||||
data: JsonUpcase<EmergencyAccessPasswordData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -645,15 +649,15 @@ async fn password_emergency_access(
|
||||
|
||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
||||
let key = data.Key;
|
||||
//let key = &data.Key;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
@@ -663,8 +667,7 @@ async fn password_emergency_access(
|
||||
};
|
||||
|
||||
// change grantor_user password
|
||||
grantor_user.set_password(new_master_password_hash, None);
|
||||
grantor_user.akey = key;
|
||||
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None);
|
||||
grantor_user.save(&mut conn).await?;
|
||||
|
||||
// Disable TwoFactor providers since they will otherwise block logins
|
||||
@@ -682,14 +685,14 @@ async fn password_emergency_access(
|
||||
// endregion
|
||||
|
||||
#[get("/emergency-access/<emer_id>/policies")]
|
||||
async fn policies_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
@@ -710,10 +713,11 @@ async fn policies_emergency_access(emer_id: String, headers: Headers, mut conn:
|
||||
|
||||
fn is_valid_request(
|
||||
emergency_access: &EmergencyAccess,
|
||||
requesting_user_uuid: String,
|
||||
requesting_user_uuid: &str,
|
||||
requested_access_type: EmergencyAccessType,
|
||||
) -> bool {
|
||||
emergency_access.grantee_uuid == Some(requesting_user_uuid)
|
||||
emergency_access.grantee_uuid.is_some()
|
||||
&& emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_uuid
|
||||
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
|
||||
&& emergency_access.atype == requested_access_type as i32
|
||||
}
|
||||
|
@@ -6,7 +6,7 @@ use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcaseVec},
|
||||
auth::{AdminHeaders, ClientIp, Headers},
|
||||
auth::{AdminHeaders, Headers},
|
||||
db::{
|
||||
models::{Cipher, Event, UserOrganization},
|
||||
DbConn, DbPool,
|
||||
@@ -32,7 +32,7 @@ struct EventRange {
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
|
||||
#[get("/organizations/<org_id>/events?<data..>")]
|
||||
async fn get_org_events(org_id: String, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
||||
async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
@@ -45,7 +45,7 @@ async fn get_org_events(org_id: String, data: EventRange, _headers: AdminHeaders
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn)
|
||||
Event::find_by_organization_uuid(org_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
@@ -60,14 +60,14 @@ async fn get_org_events(org_id: String, data: EventRange, _headers: AdminHeaders
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>/events?<data..>")]
|
||||
async fn get_cipher_events(cipher_id: String, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let mut events_json = Vec::with_capacity(0);
|
||||
if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await {
|
||||
if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, cipher_id, &mut conn).await {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
@@ -75,7 +75,7 @@ async fn get_cipher_events(cipher_id: String, data: EventRange, headers: Headers
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn)
|
||||
events_json = Event::find_by_cipher_uuid(cipher_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
@@ -93,8 +93,8 @@ async fn get_cipher_events(cipher_id: String, data: EventRange, headers: Headers
|
||||
|
||||
#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")]
|
||||
async fn get_user_events(
|
||||
org_id: String,
|
||||
user_org_id: String,
|
||||
org_id: &str,
|
||||
user_org_id: &str,
|
||||
data: EventRange,
|
||||
_headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
@@ -111,7 +111,7 @@ async fn get_user_events(
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_org_and_user_org(&org_id, &user_org_id, &start_date, &end_date, &mut conn)
|
||||
Event::find_by_org_and_user_org(org_id, user_org_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
@@ -161,12 +161,7 @@ struct EventCollection {
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||
#[post("/collect", format = "application/json", data = "<data>")]
|
||||
async fn post_events_collect(
|
||||
data: JsonUpcaseVec<EventCollection>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -180,7 +175,7 @@ async fn post_events_collect(
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -190,11 +185,11 @@ async fn post_events_collect(
|
||||
_log_event(
|
||||
event.Type,
|
||||
org_uuid,
|
||||
String::from(org_uuid),
|
||||
org_uuid,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -207,11 +202,11 @@ async fn post_events_collect(
|
||||
_log_event(
|
||||
event.Type,
|
||||
cipher_uuid,
|
||||
org_uuid,
|
||||
&org_uuid,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -267,7 +262,7 @@ async fn _log_user_event(
|
||||
pub async fn log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: String,
|
||||
org_uuid: &str,
|
||||
act_user_uuid: String,
|
||||
device_type: i32,
|
||||
ip: &IpAddr,
|
||||
@@ -283,7 +278,7 @@ pub async fn log_event(
|
||||
async fn _log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: String,
|
||||
org_uuid: &str,
|
||||
act_user_uuid: &str,
|
||||
device_type: i32,
|
||||
event_date: Option<NaiveDateTime>,
|
||||
@@ -319,7 +314,7 @@ async fn _log_event(
|
||||
_ => {}
|
||||
}
|
||||
|
||||
event.org_uuid = Some(org_uuid);
|
||||
event.org_uuid = Some(String::from(org_uuid));
|
||||
event.act_user_uuid = Some(String::from(act_user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
|
@@ -24,8 +24,8 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
}
|
||||
|
||||
#[get("/folders/<uuid>")]
|
||||
async fn get_folder(uuid: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &mut conn).await {
|
||||
async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -50,14 +50,14 @@ async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn:
|
||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
||||
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &folder).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>", data = "<data>")]
|
||||
async fn post_folder(
|
||||
uuid: String,
|
||||
uuid: &str,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
@@ -68,7 +68,7 @@ async fn post_folder(
|
||||
|
||||
#[put("/folders/<uuid>", data = "<data>")]
|
||||
async fn put_folder(
|
||||
uuid: String,
|
||||
uuid: &str,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -76,7 +76,7 @@ async fn put_folder(
|
||||
) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = match Folder::find_by_uuid(&uuid, &mut conn).await {
|
||||
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -88,19 +88,19 @@ async fn put_folder(
|
||||
folder.name = data.Name;
|
||||
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>/delete")]
|
||||
async fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
async fn delete_folder_post(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[delete("/folders/<uuid>")]
|
||||
async fn delete_folder(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &mut conn).await {
|
||||
async fn delete_folder(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -112,6 +112,6 @@ async fn delete_folder(uuid: String, headers: Headers, mut conn: DbConn, nt: Not
|
||||
// Delete the actual folder entry
|
||||
folder.delete(&mut conn).await?;
|
||||
|
||||
nt.send_folder_update(UpdateType::FolderDelete, &folder).await;
|
||||
nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device.uuid, &mut conn).await;
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -4,18 +4,17 @@ mod emergency_access;
|
||||
mod events;
|
||||
mod folders;
|
||||
mod organizations;
|
||||
mod public;
|
||||
mod sends;
|
||||
pub mod two_factor;
|
||||
|
||||
pub use ciphers::purge_trashed_ciphers;
|
||||
pub use ciphers::{CipherSyncData, CipherSyncType};
|
||||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||
pub use sends::purge_sends;
|
||||
pub use two_factor::send_incomplete_2fa_notifications;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut device_token_routes = routes![clear_device_token, put_device_token];
|
||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||
let mut hibp_routes = routes![hibp_breach];
|
||||
let mut meta_routes = routes![alive, now, version, config];
|
||||
@@ -29,7 +28,7 @@ pub fn routes() -> Vec<Route> {
|
||||
routes.append(&mut organizations::routes());
|
||||
routes.append(&mut two_factor::routes());
|
||||
routes.append(&mut sends::routes());
|
||||
routes.append(&mut device_token_routes);
|
||||
routes.append(&mut public::routes());
|
||||
routes.append(&mut eq_domains_routes);
|
||||
routes.append(&mut hibp_routes);
|
||||
routes.append(&mut meta_routes);
|
||||
@@ -47,50 +46,17 @@ pub fn events_routes() -> Vec<Route> {
|
||||
//
|
||||
// Move this somewhere else
|
||||
//
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Catcher;
|
||||
use rocket::Route;
|
||||
use rocket::{serde::json::Json, Catcher, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{JsonResult, JsonUpcase},
|
||||
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
error::Error,
|
||||
util::get_reqwest_client,
|
||||
};
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
fn clear_device_token(uuid: String) -> &'static str {
|
||||
// This endpoint doesn't have auth header
|
||||
|
||||
let _ = uuid;
|
||||
// uuid is not related to deviceId
|
||||
|
||||
// This only clears push token
|
||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
""
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> Json<Value> {
|
||||
let _data: Value = data.into_inner().data;
|
||||
// Data has a single string value "PushToken"
|
||||
let _ = uuid;
|
||||
// uuid is not related to deviceId
|
||||
|
||||
// TODO: This should save the push token, but we don't have push functionality
|
||||
|
||||
Json(json!({
|
||||
"Id": headers.device.uuid,
|
||||
"Name": headers.device.name,
|
||||
"Type": headers.device.atype,
|
||||
"Identifier": headers.device.uuid,
|
||||
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct GlobalDomain {
|
||||
@@ -138,7 +104,12 @@ struct EquivDomainData {
|
||||
}
|
||||
|
||||
#[post("/settings/domains", data = "<data>")]
|
||||
async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn post_eq_domains(
|
||||
data: JsonUpcase<EquivDomainData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: EquivDomainData = data.into_inner().data;
|
||||
|
||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||
@@ -152,19 +123,25 @@ async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, mu
|
||||
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
nt.send_user_update(UpdateType::SyncSettings, &user).await;
|
||||
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
#[put("/settings/domains", data = "<data>")]
|
||||
async fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn).await
|
||||
async fn put_eq_domains(
|
||||
data: JsonUpcase<EquivDomainData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
async fn hibp_breach(username: String) -> JsonResult {
|
||||
async fn hibp_breach(username: &str) -> JsonResult {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||
username
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
||||
);
|
||||
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
@@ -186,7 +163,7 @@ async fn hibp_breach(username: String) -> JsonResult {
|
||||
"Domain": "haveibeenpwned.com",
|
||||
"BreachDate": "2019-08-18T00:00:00Z",
|
||||
"AddedDate": "2019-08-18T00:00:00Z",
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
||||
"LogoPath": "vw_static/hibp.png",
|
||||
"PwnCount": 0,
|
||||
"DataClasses": [
|
||||
@@ -229,6 +206,7 @@ fn config() -> Json<Value> {
|
||||
"notifications": format!("{domain}/notifications"),
|
||||
"sso": "",
|
||||
},
|
||||
"object": "config",
|
||||
}))
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
238
src/api/core/public.rs
Normal file
238
src/api/core/public.rs
Normal file
@@ -0,0 +1,238 @@
|
||||
use chrono::Utc;
|
||||
use rocket::{
|
||||
request::{self, FromRequest, Outcome},
|
||||
Request, Route,
|
||||
};
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonUpcase},
|
||||
auth,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![ldap_import]
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportGroupData {
|
||||
Name: String,
|
||||
ExternalId: String,
|
||||
MemberExternalIds: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportUserData {
|
||||
Email: String,
|
||||
ExternalId: String,
|
||||
Deleted: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportData {
|
||||
Groups: Vec<OrgImportGroupData>,
|
||||
Members: Vec<OrgImportUserData>,
|
||||
OverwriteExisting: bool,
|
||||
// LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
|
||||
}
|
||||
|
||||
#[post("/public/organization/import", data = "<data>")]
|
||||
async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
|
||||
// Most of the logic for this function can be found here
|
||||
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
|
||||
|
||||
let org_id = token.0;
|
||||
let data = data.into_inner().data;
|
||||
|
||||
for user_data in &data.Members {
|
||||
if user_data.Deleted {
|
||||
// If user is marked for deletion and it exists, revoke it
|
||||
if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||
{
|
||||
user_org.revoke();
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
// If user is part of the organization, restore it
|
||||
} else if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||
{
|
||||
if user_org.status < UserOrgStatus::Revoked as i32 {
|
||||
user_org.restore();
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
} else {
|
||||
// If user is not part of the organization
|
||||
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
||||
Some(user) => user, // exists in vaultwarden
|
||||
None => {
|
||||
// doesn't exist in vaultwarden
|
||||
let mut new_user = User::new(user_data.Email.clone());
|
||||
new_user.set_external_id(Some(user_data.ExternalId.clone()));
|
||||
new_user.save(&mut conn).await?;
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
let invitation = Invitation::new(&new_user.email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
new_user
|
||||
}
|
||||
};
|
||||
let user_org_status = if CONFIG.mail_enabled() {
|
||||
UserOrgStatus::Invited as i32
|
||||
} else {
|
||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||
};
|
||||
|
||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||
new_org_user.access_all = false;
|
||||
new_org_user.atype = UserOrgType::User as i32;
|
||||
new_org_user.status = user_org_status;
|
||||
|
||||
new_org_user.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await {
|
||||
Some(org) => (org.name, org.billing_email),
|
||||
None => err!("Error looking up organization"),
|
||||
};
|
||||
|
||||
mail::send_invite(
|
||||
&user_data.Email,
|
||||
&user.uuid,
|
||||
Some(org_id.clone()),
|
||||
Some(new_org_user.uuid),
|
||||
&org_name,
|
||||
Some(org_email),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.org_groups_enabled() {
|
||||
for group_data in &data.Groups {
|
||||
let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await {
|
||||
Some(group) => group.uuid,
|
||||
None => {
|
||||
let mut group =
|
||||
Group::new(org_id.clone(), group_data.Name.clone(), false, Some(group_data.ExternalId.clone()));
|
||||
group.save(&mut conn).await?;
|
||||
group.uuid
|
||||
}
|
||||
};
|
||||
|
||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||
|
||||
for ext_id in &group_data.MemberExternalIds {
|
||||
if let Some(user) = User::find_by_external_id(ext_id, &mut conn).await {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await
|
||||
{
|
||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||
group_user.save(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Group support is disabled, groups will not be imported!");
|
||||
}
|
||||
|
||||
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||
if data.OverwriteExisting {
|
||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
||||
if let Some(user_external_id) =
|
||||
User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.external_id)
|
||||
{
|
||||
if user_external_id.is_some() && !sync_members.contains(&user_external_id.unwrap()) {
|
||||
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
||||
// Removing owner, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
||||
.await
|
||||
<= 1
|
||||
{
|
||||
warn!("Can't delete the last owner");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
user_org.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct PublicToken(String);
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for PublicToken {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
// Get access_token
|
||||
let access_token: &str = match headers.get_one("Authorization") {
|
||||
Some(a) => match a.rsplit("Bearer ").next() {
|
||||
Some(split) => split,
|
||||
None => err_handler!("No access token provided"),
|
||||
},
|
||||
None => err_handler!("No access token provided"),
|
||||
};
|
||||
// Check JWT token is valid and get device and user from it
|
||||
let claims = match auth::decode_api_org(access_token) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => err_handler!("Invalid claim"),
|
||||
};
|
||||
// Check if time is between claims.nbf and claims.exp
|
||||
let time_now = Utc::now().naive_utc().timestamp();
|
||||
if time_now < claims.nbf {
|
||||
err_handler!("Token issued in the future");
|
||||
}
|
||||
if time_now > claims.exp {
|
||||
err_handler!("Token expired");
|
||||
}
|
||||
// Check if claims.iss is host|claims.scope[0]
|
||||
let host = match auth::Host::from_request(request).await {
|
||||
Outcome::Success(host) => host,
|
||||
_ => err_handler!("Error getting Host"),
|
||||
};
|
||||
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
|
||||
if complete_host != claims.iss {
|
||||
err_handler!("Token not issued by this server");
|
||||
}
|
||||
|
||||
// Check if claims.sub is org_api_key.uuid
|
||||
// Check if claims.client_sub is org_api_key.org_uuid
|
||||
let conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
let org_uuid = match claims.client_id.strip_prefix("organization.") {
|
||||
Some(uuid) => uuid,
|
||||
None => err_handler!("Malformed client_id"),
|
||||
};
|
||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, &conn).await {
|
||||
Some(org_api_key) => org_api_key,
|
||||
None => err_handler!("Invalid client_id"),
|
||||
};
|
||||
if org_api_key.org_uuid != claims.client_sub {
|
||||
err_handler!("Token not issued for this org");
|
||||
}
|
||||
if org_api_key.uuid != claims.sub {
|
||||
err_handler!("Token not issued for this client");
|
||||
}
|
||||
|
||||
Outcome::Success(PublicToken(claims.client_sub))
|
||||
}
|
||||
}
|
@@ -154,8 +154,8 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
}
|
||||
|
||||
#[get("/sends/<uuid>")]
|
||||
async fn get_send(uuid: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let send = match Send::find_by_uuid(&uuid, &mut conn).await {
|
||||
async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let send = match Send::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(send) => send,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -180,7 +180,14 @@ async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbCon
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendCreate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
@@ -228,19 +235,6 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data
|
||||
{
|
||||
err!("Error reading send file data. Please try an other client.");
|
||||
}
|
||||
|
||||
let size = data.len();
|
||||
if size > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
@@ -265,7 +259,14 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
||||
|
||||
// Save the changes in the database
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendCreate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
@@ -328,8 +329,8 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
||||
// https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L243
|
||||
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||
async fn post_send_file_v2_data(
|
||||
send_uuid: String,
|
||||
file_id: String,
|
||||
send_uuid: &str,
|
||||
file_id: &str,
|
||||
data: Form<UploadDataV2<'_>>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -339,32 +340,29 @@ async fn post_send_file_v2_data(
|
||||
|
||||
let mut data = data.into_inner();
|
||||
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data.data
|
||||
{
|
||||
err!("Error reading attachment data. Please try an other client.");
|
||||
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else { err!("Send not found. Unable to save the file.") };
|
||||
|
||||
let Some(send_user_id) = &send.user_uuid else {err!("Sends are only supported for users at the moment")};
|
||||
if send_user_id != &headers.user.uuid {
|
||||
err!("Send doesn't belong to user");
|
||||
}
|
||||
|
||||
if let Some(send) = Send::find_by_uuid(&send_uuid, &mut conn).await {
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send_uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid);
|
||||
let file_path = folder_path.join(file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
data.data.move_copy_to(file_path).await?
|
||||
}
|
||||
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
} else {
|
||||
err!("Send not found. Unable to save the file.");
|
||||
}
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendCreate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -377,12 +375,13 @@ pub struct SendAccessData {
|
||||
|
||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||
async fn post_access(
|
||||
access_id: String,
|
||||
access_id: &str,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &mut conn).await {
|
||||
let mut send = match Send::find_by_access_id(access_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
@@ -422,18 +421,28 @@ async fn post_access(
|
||||
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&String::from("00000000-0000-0000-0000-000000000000"),
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(send.to_json_access(&mut conn).await))
|
||||
}
|
||||
|
||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||
async fn post_access_file(
|
||||
send_id: String,
|
||||
file_id: String,
|
||||
send_id: &str,
|
||||
file_id: &str,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
host: Host,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &mut conn).await {
|
||||
let mut send = match Send::find_by_uuid(send_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
@@ -470,7 +479,16 @@ async fn post_access_file(
|
||||
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&String::from("00000000-0000-0000-0000-000000000000"),
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
Ok(Json(json!({
|
||||
"Object": "send-fileDownload",
|
||||
@@ -480,9 +498,9 @@ async fn post_access_file(
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||
async fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(&t) {
|
||||
if claims.sub == format!("{}/{}", send_id, file_id) {
|
||||
async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(t) {
|
||||
if claims.sub == format!("{send_id}/{file_id}") {
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
|
||||
}
|
||||
}
|
||||
@@ -491,7 +509,7 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> O
|
||||
|
||||
#[put("/sends/<id>", data = "<data>")]
|
||||
async fn put_send(
|
||||
id: String,
|
||||
id: &str,
|
||||
data: JsonUpcase<SendData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -502,7 +520,7 @@ async fn put_send(
|
||||
let data: SendData = data.into_inner().data;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &mut conn).await {
|
||||
let mut send = match Send::find_by_uuid(id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -550,14 +568,21 @@ async fn put_send(
|
||||
}
|
||||
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[delete("/sends/<id>")]
|
||||
async fn delete_send(id: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(&id, &mut conn).await {
|
||||
async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -567,16 +592,23 @@ async fn delete_send(id: String, headers: Headers, mut conn: DbConn, nt: Notify<
|
||||
}
|
||||
|
||||
send.delete(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendDelete, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendDelete,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/sends/<id>/remove-password")]
|
||||
async fn put_remove_password(id: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
async fn put_remove_password(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &mut conn).await {
|
||||
let mut send = match Send::find_by_uuid(id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -587,7 +619,14 @@ async fn put_remove_password(id: String, headers: Headers, mut conn: DbConn, nt:
|
||||
|
||||
send.set_password(None);
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
@@ -57,7 +57,6 @@ struct EnableAuthenticatorData {
|
||||
async fn activate_authenticator(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||
@@ -82,11 +81,11 @@ async fn activate_authenticator(
|
||||
}
|
||||
|
||||
// Validate the token provided with the key, and save new twofactor
|
||||
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &mut conn).await?;
|
||||
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &headers.ip, &mut conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -99,10 +98,9 @@ async fn activate_authenticator(
|
||||
async fn activate_authenticator_put(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
activate_authenticator(data, headers, ip, conn).await
|
||||
activate_authenticator(data, headers, conn).await
|
||||
}
|
||||
|
||||
pub async fn validate_totp_code_str(
|
||||
|
@@ -8,7 +8,7 @@ use crate::{
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||
PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType, User},
|
||||
@@ -155,7 +155,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
||||
}
|
||||
|
||||
#[post("/two-factor/duo", data = "<data>")]
|
||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableDuoData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -178,7 +178,7 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
||||
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -190,8 +190,8 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
||||
}
|
||||
|
||||
#[put("/two-factor/duo", data = "<data>")]
|
||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
activate_duo(data, headers, conn, ip).await
|
||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_duo(data, headers, conn).await
|
||||
}
|
||||
|
||||
async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
@@ -270,11 +270,11 @@ pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult
|
||||
let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE);
|
||||
let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE);
|
||||
|
||||
Ok((format!("{}:{}", duo_sign, app_sign), host))
|
||||
Ok((format!("{duo_sign}:{app_sign}"), host))
|
||||
}
|
||||
|
||||
fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64) -> String {
|
||||
let val = format!("{}|{}|{}", email, ikey, expire);
|
||||
let val = format!("{email}|{ikey}|{expire}");
|
||||
let cookie = format!("{}|{}", prefix, BASE64.encode(val.as_bytes()));
|
||||
|
||||
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
|
||||
@@ -327,7 +327,7 @@ fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -
|
||||
let u_b64 = split[1];
|
||||
let u_sig = split[2];
|
||||
|
||||
let sig = crypto::hmac_sign(key, &format!("{}|{}", u_prefix, u_b64));
|
||||
let sig = crypto::hmac_sign(key, &format!("{u_prefix}|{u_b64}"));
|
||||
|
||||
if !crypto::ct_eq(crypto::hmac_sign(key, &sig), crypto::hmac_sign(key, u_sig)) {
|
||||
err!("Duo signatures don't match")
|
||||
|
@@ -7,7 +7,7 @@ use crate::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
@@ -90,7 +90,7 @@ async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: D
|
||||
let twofactor_data = EmailTokenData::from_json(&x.data)?;
|
||||
(true, json!(twofactor_data.email))
|
||||
}
|
||||
_ => (false, json!(null)),
|
||||
_ => (false, serde_json::value::Value::Null),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -150,7 +150,7 @@ struct EmailData {
|
||||
|
||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||
#[put("/two-factor/email", data = "<data>")]
|
||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -180,7 +180,7 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn,
|
||||
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": email_data.email,
|
||||
@@ -304,7 +304,7 @@ pub fn obscure_email(email: &str) -> String {
|
||||
_ => {
|
||||
let stars = "*".repeat(name_size - 2);
|
||||
name.truncate(2);
|
||||
format!("{}{}", name, stars)
|
||||
format!("{name}{stars}")
|
||||
}
|
||||
};
|
||||
|
||||
|
@@ -6,7 +6,7 @@ use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
auth::{ClientHeaders, ClientIp, Headers},
|
||||
auth::{ClientHeaders, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
@@ -73,12 +73,7 @@ struct RecoverTwoFactor {
|
||||
}
|
||||
|
||||
#[post("/two-factor/recover", data = "<data>")]
|
||||
async fn recover(
|
||||
data: JsonUpcase<RecoverTwoFactor>,
|
||||
client_headers: ClientHeaders,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||
let data: RecoverTwoFactor = data.into_inner().data;
|
||||
|
||||
use crate::db::models::User;
|
||||
@@ -102,12 +97,19 @@ async fn recover(
|
||||
// Remove all twofactors from the user
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
|
||||
log_user_event(EventType::UserRecovered2fa as i32, &user.uuid, client_headers.device_type, &ip.ip, &mut conn).await;
|
||||
log_user_event(
|
||||
EventType::UserRecovered2fa as i32,
|
||||
&user.uuid,
|
||||
client_headers.device_type,
|
||||
&client_headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Remove the recovery code, not needed without twofactors
|
||||
user.totp_recover = None;
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(json!({})))
|
||||
Ok(Json(Value::Object(serde_json::Map::new())))
|
||||
}
|
||||
|
||||
async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
||||
@@ -126,12 +128,7 @@ struct DisableTwoFactorData {
|
||||
}
|
||||
|
||||
#[post("/two-factor/disable", data = "<data>")]
|
||||
async fn disable_twofactor(
|
||||
data: JsonUpcase<DisableTwoFactorData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: DisableTwoFactorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
let user = headers.user;
|
||||
@@ -144,7 +141,8 @@ async fn disable_twofactor(
|
||||
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
twofactor.delete(&mut conn).await?;
|
||||
log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn)
|
||||
.await;
|
||||
}
|
||||
|
||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
|
||||
@@ -173,13 +171,8 @@ async fn disable_twofactor(
|
||||
}
|
||||
|
||||
#[put("/two-factor/disable", data = "<data>")]
|
||||
async fn disable_twofactor_put(
|
||||
data: JsonUpcase<DisableTwoFactorData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn, ip).await
|
||||
async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn).await
|
||||
}
|
||||
|
||||
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||
|
@@ -9,7 +9,7 @@ use crate::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
@@ -242,12 +242,7 @@ impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||
}
|
||||
|
||||
#[post("/two-factor/webauthn", data = "<data>")]
|
||||
async fn activate_webauthn(
|
||||
data: JsonUpcase<EnableWebauthnData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableWebauthnData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -286,7 +281,7 @@ async fn activate_webauthn(
|
||||
.await?;
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
@@ -297,13 +292,8 @@ async fn activate_webauthn(
|
||||
}
|
||||
|
||||
#[put("/two-factor/webauthn", data = "<data>")]
|
||||
async fn activate_webauthn_put(
|
||||
data: JsonUpcase<EnableWebauthnData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn, ip).await
|
||||
async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
|
@@ -8,7 +8,7 @@ use crate::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
@@ -47,7 +47,7 @@ fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
|
||||
}
|
||||
|
||||
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
|
||||
let mut result = json!({});
|
||||
let mut result = Value::Object(serde_json::Map::new());
|
||||
|
||||
for (i, key) in yubikeys.into_iter().enumerate() {
|
||||
result[format!("Key{}", i + 1)] = Value::String(key);
|
||||
@@ -118,12 +118,7 @@ async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut
|
||||
}
|
||||
|
||||
#[post("/two-factor/yubikey", data = "<data>")]
|
||||
async fn activate_yubikey(
|
||||
data: JsonUpcase<EnableYubikeyData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableYubikeyData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -169,7 +164,7 @@ async fn activate_yubikey(
|
||||
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
||||
|
||||
@@ -181,13 +176,8 @@ async fn activate_yubikey(
|
||||
}
|
||||
|
||||
#[put("/two-factor/yubikey", data = "<data>")]
|
||||
async fn activate_yubikey_put(
|
||||
data: JsonUpcase<EnableYubikeyData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
activate_yubikey(data, headers, conn, ip).await
|
||||
async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_yubikey(data, headers, conn).await
|
||||
}
|
||||
|
||||
pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult {
|
||||
|
@@ -79,7 +79,7 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
||||
return None;
|
||||
}
|
||||
|
||||
if is_domain_blacklisted(domain).await {
|
||||
if check_domain_blacklist_reason(domain).await.is_some() {
|
||||
return None;
|
||||
}
|
||||
|
||||
@@ -97,15 +97,15 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
async fn icon_external(domain: String) -> Option<Redirect> {
|
||||
icon_redirect(&domain, &CONFIG._icon_service_url()).await
|
||||
async fn icon_external(domain: &str) -> Option<Redirect> {
|
||||
icon_redirect(domain, &CONFIG._icon_service_url()).await
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
async fn icon_internal(domain: String) -> Cached<(ContentType, Vec<u8>)> {
|
||||
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||
|
||||
if !is_valid_domain(&domain) {
|
||||
if !is_valid_domain(domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return Cached::ttl(
|
||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
@@ -114,7 +114,7 @@ async fn icon_internal(domain: String) -> Cached<(ContentType, Vec<u8>)> {
|
||||
);
|
||||
}
|
||||
|
||||
match get_icon(&domain).await {
|
||||
match get_icon(domain).await {
|
||||
Some((icon, icon_type)) => {
|
||||
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
|
||||
}
|
||||
@@ -130,7 +130,7 @@ fn is_valid_domain(domain: &str) -> bool {
|
||||
const ALLOWED_CHARS: &str = "_-.";
|
||||
|
||||
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||
if let Err(parse_error) = url::Url::parse(format!("https://{}", domain).as_str()) {
|
||||
if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) {
|
||||
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
||||
return false;
|
||||
} else if domain.is_empty()
|
||||
@@ -258,9 +258,15 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum DomainBlacklistReason {
|
||||
Regex,
|
||||
IP,
|
||||
}
|
||||
|
||||
use cached::proc_macro::cached;
|
||||
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
|
||||
async fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
|
||||
// First check the blacklist regex if there is a match.
|
||||
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
@@ -284,7 +290,7 @@ async fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
|
||||
if is_match {
|
||||
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
||||
return true;
|
||||
return Some(DomainBlacklistReason::Regex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -293,13 +299,13 @@ async fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
for addr in s {
|
||||
if !is_global(addr.ip()) {
|
||||
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
|
||||
return true;
|
||||
return Some(DomainBlacklistReason::IP);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
None
|
||||
}
|
||||
|
||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
@@ -564,8 +570,10 @@ async fn get_page(url: &str) -> Result<Response, Error> {
|
||||
}
|
||||
|
||||
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
||||
warn!("Favicon '{}' resolves to a blacklisted domain or IP!", url);
|
||||
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
||||
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
|
||||
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
|
||||
None => (),
|
||||
}
|
||||
|
||||
let mut client = CLIENT.get(url);
|
||||
@@ -575,7 +583,7 @@ async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Err
|
||||
|
||||
match client.send().await {
|
||||
Ok(c) => c.error_for_status().map_err(Into::into),
|
||||
Err(e) => err_silent!(format!("{}", e)),
|
||||
Err(e) => err_silent!(format!("{e}")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -659,8 +667,10 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
|
||||
}
|
||||
|
||||
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
if is_domain_blacklisted(domain).await {
|
||||
err_silent!("Domain is blacklisted", domain)
|
||||
match check_domain_blacklist_reason(domain).await {
|
||||
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
|
||||
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
|
||||
None => (),
|
||||
}
|
||||
|
||||
let icon_result = get_icon_url(domain).await?;
|
||||
@@ -672,7 +682,7 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
|
||||
for icon in icon_result.iconlist.iter().take(5) {
|
||||
if icon.href.starts_with("data:image") {
|
||||
let datauri = DataUrl::process(&icon.href).unwrap();
|
||||
let Ok(datauri) = DataUrl::process(&icon.href) else {continue};
|
||||
// Check if we are able to decode the data uri
|
||||
let mut body = BytesMut::new();
|
||||
match datauri.decode::<_, ()>(|bytes| {
|
||||
@@ -797,7 +807,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
||||
let cookie_store = self.0.read().unwrap();
|
||||
let s = cookie_store
|
||||
.get_request_values(url)
|
||||
.map(|(name, value)| format!("{}={}", name, value))
|
||||
.map(|(name, value)| format!("{name}={value}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("; ");
|
||||
|
||||
@@ -881,6 +891,7 @@ impl Emitter for FaviconEmitter {
|
||||
FaviconToken::EndTag(ref mut tag) => {
|
||||
// Always clean seen attributes
|
||||
self.seen_attributes.clear();
|
||||
self.set_last_start_tag(None);
|
||||
|
||||
// Only trigger an emit for the </head> tag.
|
||||
// This is matched, and will break the for-loop.
|
||||
|
@@ -14,7 +14,7 @@ use crate::{
|
||||
core::two_factor::{duo, email, email::EmailTokenData, yubikey},
|
||||
ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||
},
|
||||
auth::{ClientHeaders, ClientIp},
|
||||
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
|
||||
db::{models::*, DbConn},
|
||||
error::MapResult,
|
||||
mail, util, CONFIG,
|
||||
@@ -25,7 +25,7 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[post("/connect/token", data = "<data>")]
|
||||
async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||
let data: ConnectData = data.into_inner();
|
||||
|
||||
let mut user_uuid: Option<String> = None;
|
||||
@@ -45,14 +45,18 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_password_login(data, &mut user_uuid, &mut conn, &ip).await
|
||||
_password_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
|
||||
}
|
||||
"client_credentials" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
_check_is_some(&data.client_secret, "client_secret cannot be blank")?;
|
||||
_check_is_some(&data.scope, "scope cannot be blank")?;
|
||||
|
||||
_api_key_login(data, &mut user_uuid, &mut conn, &ip).await
|
||||
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_api_key_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
|
||||
}
|
||||
t => err!("Invalid type", t),
|
||||
};
|
||||
@@ -64,14 +68,21 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
||||
EventType::UserLoggedIn as i32,
|
||||
&user_uuid,
|
||||
client_header.device_type,
|
||||
&ip.ip,
|
||||
&client_header.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Err(e) => {
|
||||
if let Some(ev) = e.get_event() {
|
||||
log_user_event(ev.event as i32, &user_uuid, client_header.device_type, &ip.ip, &mut conn).await
|
||||
log_user_event(
|
||||
ev.event as i32,
|
||||
&user_uuid,
|
||||
client_header.device_type,
|
||||
&client_header.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -96,7 +107,7 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
let result = json!({
|
||||
"access_token": access_token,
|
||||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
@@ -106,10 +117,14 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"KdfMemory": user.client_kdf_memory,
|
||||
"KdfParallelism": user.client_kdf_parallelism,
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
})))
|
||||
});
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
async fn _password_login(
|
||||
@@ -130,7 +145,7 @@ async fn _password_login(
|
||||
|
||||
// Get the user
|
||||
let username = data.username.as_ref().unwrap().trim();
|
||||
let user = match User::find_by_mail(username, conn).await {
|
||||
let mut user = match User::find_by_mail(username, conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
|
||||
};
|
||||
@@ -150,6 +165,16 @@ async fn _password_login(
|
||||
)
|
||||
}
|
||||
|
||||
// Change the KDF Iterations
|
||||
if user.password_iterations != CONFIG.password_iterations() {
|
||||
user.password_iterations = CONFIG.password_iterations();
|
||||
user.set_password(password, None, false, None);
|
||||
|
||||
if let Err(e) = user.save(conn).await {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
@@ -172,7 +197,6 @@ async fn _password_login(
|
||||
if resend_limit == 0 || user.login_verify_count < resend_limit {
|
||||
// We want to send another email verification if we require signups to verify
|
||||
// their email address, and we haven't sent them a reminder in a while...
|
||||
let mut user = user;
|
||||
user.last_verifying_at = Some(now);
|
||||
user.login_verify_count += 1;
|
||||
|
||||
@@ -231,6 +255,8 @@ async fn _password_login(
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"KdfMemory": user.client_kdf_memory,
|
||||
"KdfParallelism": user.client_kdf_parallelism,
|
||||
"ResetMasterPassword": false,// TODO: Same as above
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
@@ -250,16 +276,23 @@ async fn _api_key_login(
|
||||
conn: &mut DbConn,
|
||||
ip: &ClientIp,
|
||||
) -> JsonResult {
|
||||
// Validate scope
|
||||
let scope = data.scope.as_ref().unwrap();
|
||||
if scope != "api" {
|
||||
err!("Scope not supported")
|
||||
}
|
||||
let scope_vec = vec!["api".into()];
|
||||
|
||||
// Ratelimit the login
|
||||
crate::ratelimit::check_limit_login(&ip.ip)?;
|
||||
|
||||
// Validate scope
|
||||
match data.scope.as_ref().unwrap().as_ref() {
|
||||
"api" => _user_api_key_login(data, user_uuid, conn, ip).await,
|
||||
"api.organization" => _organization_api_key_login(data, conn, ip).await,
|
||||
_ => err!("Scope not supported"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn _user_api_key_login(
|
||||
data: ConnectData,
|
||||
user_uuid: &mut Option<String>,
|
||||
conn: &mut DbConn,
|
||||
ip: &ClientIp,
|
||||
) -> JsonResult {
|
||||
// Get the user via the client_id
|
||||
let client_id = data.client_id.as_ref().unwrap();
|
||||
let client_user_uuid = match client_id.strip_prefix("user.") {
|
||||
@@ -316,6 +349,7 @@ async fn _api_key_login(
|
||||
}
|
||||
|
||||
// Common
|
||||
let scope_vec = vec!["api".into()];
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
device.save(conn).await?;
|
||||
@@ -324,7 +358,7 @@ async fn _api_key_login(
|
||||
|
||||
// Note: No refresh_token is returned. The CLI just repeats the
|
||||
// client_credentials login flow when the existing token expires.
|
||||
Ok(Json(json!({
|
||||
let result = json!({
|
||||
"access_token": access_token,
|
||||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
@@ -333,8 +367,42 @@ async fn _api_key_login(
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"KdfMemory": user.client_kdf_memory,
|
||||
"KdfParallelism": user.client_kdf_parallelism,
|
||||
"ResetMasterPassword": false, // TODO: Same as above
|
||||
"scope": scope,
|
||||
"scope": "api",
|
||||
"unofficialServer": true,
|
||||
});
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult {
|
||||
// Get the org via the client_id
|
||||
let client_id = data.client_id.as_ref().unwrap();
|
||||
let org_uuid = match client_id.strip_prefix("organization.") {
|
||||
Some(uuid) => uuid,
|
||||
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
|
||||
};
|
||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, conn).await {
|
||||
Some(org_api_key) => org_api_key,
|
||||
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
|
||||
};
|
||||
|
||||
// Check API key.
|
||||
let client_secret = data.client_secret.as_ref().unwrap();
|
||||
if !org_api_key.check_valid_api_key(client_secret) {
|
||||
err!("Incorrect client_secret", format!("IP: {}. Organization: {}.", ip.ip, org_api_key.org_uuid))
|
||||
}
|
||||
|
||||
let claim = generate_organization_api_key_login_claims(org_api_key.uuid, org_api_key.org_uuid);
|
||||
let access_token = crate::auth::encode_jwt(&claim);
|
||||
|
||||
Ok(Json(json!({
|
||||
"access_token": access_token,
|
||||
"expires_in": 3600,
|
||||
"token_type": "Bearer",
|
||||
"scope": "api.organization",
|
||||
"unofficialServer": true,
|
||||
})))
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@ pub mod core;
|
||||
mod icons;
|
||||
mod identity;
|
||||
mod notifications;
|
||||
mod push;
|
||||
mod web;
|
||||
|
||||
use rocket::serde::json::Json;
|
||||
@@ -22,6 +23,10 @@ pub use crate::api::{
|
||||
identity::routes as identity_routes,
|
||||
notifications::routes as notifications_routes,
|
||||
notifications::{start_notification_server, Notify, UpdateType},
|
||||
push::{
|
||||
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
|
||||
unregister_push_device,
|
||||
},
|
||||
web::catchers as web_catchers,
|
||||
web::routes as web_routes,
|
||||
web::static_files,
|
||||
|
@@ -1,17 +1,15 @@
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use chrono::NaiveDateTime;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use rmpv::Value;
|
||||
use rocket::{serde::json::Json, Route};
|
||||
use serde_json::Value as JsonValue;
|
||||
use rocket::{
|
||||
futures::{SinkExt, StreamExt},
|
||||
Route,
|
||||
};
|
||||
use tokio::{
|
||||
net::{TcpListener, TcpStream},
|
||||
sync::mpsc::Sender,
|
||||
@@ -22,56 +20,128 @@ use tokio_tungstenite::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::EmptyResult,
|
||||
auth::Headers,
|
||||
db::models::{Cipher, Folder, Send, User},
|
||||
auth::ClientIp,
|
||||
db::{
|
||||
models::{Cipher, Folder, Send as DbSend, User},
|
||||
DbConn,
|
||||
},
|
||||
Error, CONFIG,
|
||||
};
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
|
||||
Arc::new(WebSocketUsers {
|
||||
map: Arc::new(dashmap::DashMap::new()),
|
||||
})
|
||||
});
|
||||
|
||||
use super::{push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![negotiate, websockets_err]
|
||||
routes![websockets_hub]
|
||||
}
|
||||
|
||||
#[get("/hub")]
|
||||
fn websockets_err() -> EmptyResult {
|
||||
static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
||||
#[derive(FromForm, Debug)]
|
||||
struct WsAccessToken {
|
||||
access_token: Option<String>,
|
||||
}
|
||||
|
||||
if CONFIG.websocket_enabled()
|
||||
&& SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok()
|
||||
{
|
||||
err!(
|
||||
"
|
||||
###########################################################
|
||||
'/notifications/hub' should be proxied to the websocket server or notifications won't work.
|
||||
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
|
||||
###########################################################################################\n"
|
||||
)
|
||||
} else {
|
||||
Err(Error::empty())
|
||||
struct WSEntryMapGuard {
|
||||
users: Arc<WebSocketUsers>,
|
||||
user_uuid: String,
|
||||
entry_uuid: uuid::Uuid,
|
||||
addr: IpAddr,
|
||||
}
|
||||
|
||||
impl WSEntryMapGuard {
|
||||
fn new(users: Arc<WebSocketUsers>, user_uuid: String, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self {
|
||||
Self {
|
||||
users,
|
||||
user_uuid,
|
||||
entry_uuid,
|
||||
addr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/hub/negotiate")]
|
||||
fn negotiate(_headers: Headers) -> Json<JsonValue> {
|
||||
use crate::crypto;
|
||||
use data_encoding::BASE64URL;
|
||||
impl Drop for WSEntryMapGuard {
|
||||
fn drop(&mut self) {
|
||||
info!("Closing WS connection from {}", self.addr);
|
||||
if let Some(mut entry) = self.users.map.get_mut(&self.user_uuid) {
|
||||
entry.retain(|(uuid, _)| uuid != &self.entry_uuid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let conn_id = crypto::encode_random_bytes::<16>(BASE64URL);
|
||||
let mut available_transports: Vec<JsonValue> = Vec::new();
|
||||
#[get("/hub?<data..>")]
|
||||
fn websockets_hub<'r>(
|
||||
ws: rocket_ws::WebSocket,
|
||||
data: WsAccessToken,
|
||||
ip: ClientIp,
|
||||
) -> Result<rocket_ws::Stream!['r], Error> {
|
||||
let addr = ip.ip;
|
||||
info!("Accepting Rocket WS connection from {addr}");
|
||||
|
||||
if CONFIG.websocket_enabled() {
|
||||
available_transports.push(json!({"transport":"WebSockets", "transferFormats":["Text","Binary"]}));
|
||||
let Some(token) = data.access_token else { err_code!("Invalid claim", 401) };
|
||||
let Ok(claims) = crate::auth::decode_login(&token) else { err_code!("Invalid token", 401) };
|
||||
|
||||
let (mut rx, guard) = {
|
||||
let users = Arc::clone(&WS_USERS);
|
||||
|
||||
// Add a channel to send messages to this client to the map
|
||||
let entry_uuid = uuid::Uuid::new_v4();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
||||
users.map.entry(claims.sub.clone()).or_default().push((entry_uuid, tx));
|
||||
|
||||
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
||||
(rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, addr))
|
||||
};
|
||||
|
||||
Ok({
|
||||
rocket_ws::Stream! { ws => {
|
||||
let mut ws = ws;
|
||||
let _guard = guard;
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||
loop {
|
||||
tokio::select! {
|
||||
res = ws.next() => {
|
||||
match res {
|
||||
Some(Ok(message)) => {
|
||||
match message {
|
||||
// Respond to any pings
|
||||
Message::Ping(ping) => yield Message::Pong(ping),
|
||||
Message::Pong(_) => {/* Ignored */},
|
||||
|
||||
// We should receive an initial message with the protocol and version, and we will reply to it
|
||||
Message::Text(ref message) => {
|
||||
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
|
||||
|
||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||
yield Message::binary(INITIAL_RESPONSE);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// Just echo anything else the client sends
|
||||
_ => yield message,
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement transports
|
||||
// Rocket WS support: https://github.com/SergioBenitez/Rocket/issues/90
|
||||
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
|
||||
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
||||
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
||||
Json(json!({
|
||||
"connectionId": conn_id,
|
||||
"availableTransports": available_transports
|
||||
}))
|
||||
res = rx.recv() => {
|
||||
match res {
|
||||
Some(res) => yield res,
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
_ = interval.tick() => yield Message::Ping(create_ping())
|
||||
}
|
||||
}
|
||||
}}
|
||||
})
|
||||
}
|
||||
|
||||
//
|
||||
@@ -152,8 +222,8 @@ impl WebSocketUsers {
|
||||
async fn send_update(&self, user_uuid: &str, data: &[u8]) {
|
||||
if let Some(user) = self.map.get(user_uuid).map(|v| v.clone()) {
|
||||
for (_, sender) in user.iter() {
|
||||
if sender.send(Message::binary(data)).await.is_err() {
|
||||
// TODO: Delete from map here too?
|
||||
if let Err(e) = sender.send(Message::binary(data)).await {
|
||||
error!("Error sending WS update {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -164,12 +234,37 @@ impl WebSocketUsers {
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
ut,
|
||||
None,
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_user_update(ut, user);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
|
||||
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) {
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
UpdateType::LogOut,
|
||||
acting_device_uuid.clone(),
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_logout(user, acting_device_uuid);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_folder_update(
|
||||
&self,
|
||||
ut: UpdateType,
|
||||
folder: &Folder,
|
||||
acting_device_uuid: &String,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), folder.uuid.clone().into()),
|
||||
@@ -177,32 +272,67 @@ impl WebSocketUsers {
|
||||
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
||||
],
|
||||
ut,
|
||||
Some(acting_device_uuid.into()),
|
||||
);
|
||||
|
||||
self.send_update(&folder.user_uuid, &data).await;
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_folder_update(ut, folder, acting_device_uuid, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &[String]) {
|
||||
let user_uuid = convert_option(cipher.user_uuid.clone());
|
||||
pub async fn send_cipher_update(
|
||||
&self,
|
||||
ut: UpdateType,
|
||||
cipher: &Cipher,
|
||||
user_uuids: &[String],
|
||||
acting_device_uuid: &String,
|
||||
collection_uuids: Option<Vec<String>>,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
||||
// Depending if there are collections provided or not, we need to have different values for the following variables.
|
||||
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
|
||||
let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
|
||||
(
|
||||
Value::Nil,
|
||||
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<rmpv::Value>>()),
|
||||
serialize_date(Utc::now().naive_utc()),
|
||||
)
|
||||
} else {
|
||||
(convert_option(cipher.user_uuid.clone()), Value::Nil, serialize_date(cipher.updated_at))
|
||||
};
|
||||
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), cipher.uuid.clone().into()),
|
||||
("UserId".into(), user_uuid),
|
||||
("OrganizationId".into(), org_uuid),
|
||||
("CollectionIds".into(), Value::Nil),
|
||||
("RevisionDate".into(), serialize_date(cipher.updated_at)),
|
||||
("CollectionIds".into(), collection_uuids),
|
||||
("RevisionDate".into(), revision_date),
|
||||
],
|
||||
ut,
|
||||
Some(acting_device_uuid.into()),
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
||||
push_cipher_update(ut, cipher, acting_device_uuid, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_send_update(&self, ut: UpdateType, send: &Send, user_uuids: &[String]) {
|
||||
pub async fn send_send_update(
|
||||
&self,
|
||||
ut: UpdateType,
|
||||
send: &DbSend,
|
||||
user_uuids: &[String],
|
||||
acting_device_uuid: &String,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
let user_uuid = convert_option(send.user_uuid.clone());
|
||||
|
||||
let data = create_update(
|
||||
@@ -212,11 +342,15 @@ impl WebSocketUsers {
|
||||
("RevisionDate".into(), serialize_date(send.revision_date)),
|
||||
],
|
||||
ut,
|
||||
None,
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
||||
push_send_update(ut, send, acting_device_uuid, conn).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -228,14 +362,14 @@ impl WebSocketUsers {
|
||||
"ReceiveMessage", // Target
|
||||
[ // Arguments
|
||||
{
|
||||
"ContextId": "app_id",
|
||||
"ContextId": acting_device_uuid || Nil,
|
||||
"Type": ut as i32,
|
||||
"Payload": {}
|
||||
}
|
||||
]
|
||||
]
|
||||
*/
|
||||
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
|
||||
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uuid: Option<String>) -> Vec<u8> {
|
||||
use rmpv::Value as V;
|
||||
|
||||
let value = V::Array(vec![
|
||||
@@ -244,7 +378,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
|
||||
V::Nil,
|
||||
"ReceiveMessage".into(),
|
||||
V::Array(vec![V::Map(vec![
|
||||
("ContextId".into(), "app_id".into()),
|
||||
("ContextId".into(), acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| V::Nil)),
|
||||
("Type".into(), (ut as i32).into()),
|
||||
("Payload".into(), payload.into()),
|
||||
])]),
|
||||
@@ -258,19 +392,19 @@ fn create_ping() -> Vec<u8> {
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Eq, PartialEq)]
|
||||
#[derive(Copy, Clone, Eq, PartialEq)]
|
||||
pub enum UpdateType {
|
||||
CipherUpdate = 0,
|
||||
CipherCreate = 1,
|
||||
LoginDelete = 2,
|
||||
FolderDelete = 3,
|
||||
Ciphers = 4,
|
||||
SyncCipherUpdate = 0,
|
||||
SyncCipherCreate = 1,
|
||||
SyncLoginDelete = 2,
|
||||
SyncFolderDelete = 3,
|
||||
SyncCiphers = 4,
|
||||
|
||||
Vault = 5,
|
||||
OrgKeys = 6,
|
||||
FolderCreate = 7,
|
||||
FolderUpdate = 8,
|
||||
CipherDelete = 9,
|
||||
SyncVault = 5,
|
||||
SyncOrgKeys = 6,
|
||||
SyncFolderCreate = 7,
|
||||
SyncFolderUpdate = 8,
|
||||
SyncCipherDelete = 9,
|
||||
SyncSettings = 10,
|
||||
|
||||
LogOut = 11,
|
||||
@@ -279,18 +413,18 @@ pub enum UpdateType {
|
||||
SyncSendUpdate = 13,
|
||||
SyncSendDelete = 14,
|
||||
|
||||
AuthRequest = 15,
|
||||
AuthRequestResponse = 16,
|
||||
|
||||
None = 100,
|
||||
}
|
||||
|
||||
pub type Notify<'a> = &'a rocket::State<WebSocketUsers>;
|
||||
|
||||
pub fn start_notification_server() -> WebSocketUsers {
|
||||
let users = WebSocketUsers {
|
||||
map: Arc::new(dashmap::DashMap::new()),
|
||||
};
|
||||
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
|
||||
|
||||
pub fn start_notification_server() -> Arc<WebSocketUsers> {
|
||||
let users = Arc::clone(&WS_USERS);
|
||||
if CONFIG.websocket_enabled() {
|
||||
let users2 = users.clone();
|
||||
let users2 = Arc::<WebSocketUsers>::clone(&users);
|
||||
tokio::spawn(async move {
|
||||
let addr = (CONFIG.websocket_address(), CONFIG.websocket_port());
|
||||
info!("Starting WebSockets server on {}:{}", addr.0, addr.1);
|
||||
@@ -302,7 +436,7 @@ pub fn start_notification_server() -> WebSocketUsers {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Ok((stream, addr)) = listener.accept() => {
|
||||
tokio::spawn(handle_connection(stream, users2.clone(), addr));
|
||||
tokio::spawn(handle_connection(stream, Arc::<WebSocketUsers>::clone(&users2), addr));
|
||||
}
|
||||
|
||||
_ = &mut shutdown_rx => {
|
||||
@@ -318,7 +452,7 @@ pub fn start_notification_server() -> WebSocketUsers {
|
||||
users
|
||||
}
|
||||
|
||||
async fn handle_connection(stream: TcpStream, users: WebSocketUsers, addr: SocketAddr) -> Result<(), Error> {
|
||||
async fn handle_connection(stream: TcpStream, users: Arc<WebSocketUsers>, addr: SocketAddr) -> Result<(), Error> {
|
||||
let mut user_uuid: Option<String> = None;
|
||||
|
||||
info!("Accepting WS connection from {addr}");
|
||||
@@ -338,30 +472,30 @@ async fn handle_connection(stream: TcpStream, users: WebSocketUsers, addr: Socke
|
||||
|
||||
let user_uuid = user_uuid.expect("User UUID should be set after the handshake");
|
||||
|
||||
let (mut rx, guard) = {
|
||||
// Add a channel to send messages to this client to the map
|
||||
let entry_uuid = uuid::Uuid::new_v4();
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(100);
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
||||
users.map.entry(user_uuid.clone()).or_default().push((entry_uuid, tx));
|
||||
|
||||
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
||||
(rx, WSEntryMapGuard::new(users, user_uuid, entry_uuid, addr.ip()))
|
||||
};
|
||||
|
||||
let _guard = guard;
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||
loop {
|
||||
tokio::select! {
|
||||
res = stream.next() => {
|
||||
match res {
|
||||
Some(Ok(message)) => {
|
||||
match message {
|
||||
// Respond to any pings
|
||||
if let Message::Ping(ping) = message {
|
||||
if stream.send(Message::Pong(ping)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
} else if let Message::Pong(_) = message {
|
||||
/* Ignored */
|
||||
continue;
|
||||
}
|
||||
Message::Ping(ping) => stream.send(Message::Pong(ping)).await?,
|
||||
Message::Pong(_) => {/* Ignored */},
|
||||
|
||||
// We should receive an initial message with the protocol and version, and we will reply to it
|
||||
if let Message::Text(ref message) = message {
|
||||
Message::Text(ref message) => {
|
||||
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
|
||||
|
||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||
@@ -369,10 +503,8 @@ async fn handle_connection(stream: TcpStream, users: WebSocketUsers, addr: Socke
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Just echo anything else the client sends
|
||||
if stream.send(message).await.is_err() {
|
||||
break;
|
||||
_ => stream.send(message).await?,
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
@@ -381,27 +513,15 @@ async fn handle_connection(stream: TcpStream, users: WebSocketUsers, addr: Socke
|
||||
|
||||
res = rx.recv() => {
|
||||
match res {
|
||||
Some(res) => {
|
||||
if stream.send(res).await.is_err() {
|
||||
break;
|
||||
}
|
||||
},
|
||||
Some(res) => stream.send(res).await?,
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
_= interval.tick() => {
|
||||
if stream.send(Message::Ping(create_ping())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ = interval.tick() => stream.send(Message::Ping(create_ping())).await?
|
||||
}
|
||||
}
|
||||
|
||||
info!("Closing WS connection from {addr}");
|
||||
|
||||
// Delete from map
|
||||
users.map.entry(user_uuid).or_default().retain(|(uuid, _)| uuid != &entry_uuid);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
257
src/api/push.rs
Normal file
257
src/api/push.rs
Normal file
@@ -0,0 +1,257 @@
|
||||
use reqwest::header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE};
|
||||
use serde_json::Value;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, UpdateType},
|
||||
db::models::{Cipher, Device, Folder, Send, User},
|
||||
util::get_reqwest_client,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct AuthPushToken {
|
||||
access_token: String,
|
||||
expires_in: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct LocalAuthPushToken {
|
||||
access_token: String,
|
||||
valid_until: Instant,
|
||||
}
|
||||
|
||||
async fn get_auth_push_token() -> ApiResult<String> {
|
||||
static PUSH_TOKEN: Lazy<RwLock<LocalAuthPushToken>> = Lazy::new(|| {
|
||||
RwLock::new(LocalAuthPushToken {
|
||||
access_token: String::new(),
|
||||
valid_until: Instant::now(),
|
||||
})
|
||||
});
|
||||
let push_token = PUSH_TOKEN.read().await;
|
||||
|
||||
if push_token.valid_until.saturating_duration_since(Instant::now()).as_secs() > 0 {
|
||||
debug!("Auth Push token still valid, no need for a new one");
|
||||
return Ok(push_token.access_token.clone());
|
||||
}
|
||||
drop(push_token); // Drop the read lock now
|
||||
|
||||
let installation_id = CONFIG.push_installation_id();
|
||||
let client_id = format!("installation.{installation_id}");
|
||||
let client_secret = CONFIG.push_installation_key();
|
||||
|
||||
let params = [
|
||||
("grant_type", "client_credentials"),
|
||||
("scope", "api.push"),
|
||||
("client_id", &client_id),
|
||||
("client_secret", &client_secret),
|
||||
];
|
||||
|
||||
let res = match get_reqwest_client().post("https://identity.bitwarden.com/connect/token").form(¶ms).send().await
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("Error getting push token from bitwarden server: {e}")),
|
||||
};
|
||||
|
||||
let json_pushtoken = match res.json::<AuthPushToken>().await {
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("Unexpected push token received from bitwarden server: {e}")),
|
||||
};
|
||||
|
||||
let mut push_token = PUSH_TOKEN.write().await;
|
||||
push_token.valid_until = Instant::now()
|
||||
.checked_add(Duration::new((json_pushtoken.expires_in / 2) as u64, 0)) // Token valid for half the specified time
|
||||
.unwrap();
|
||||
|
||||
push_token.access_token = json_pushtoken.access_token;
|
||||
|
||||
debug!("Token still valid for {}", push_token.valid_until.saturating_duration_since(Instant::now()).as_secs());
|
||||
Ok(push_token.access_token.clone())
|
||||
}
|
||||
|
||||
pub async fn register_push_device(user_uuid: String, device: Device) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
let auth_push_token = get_auth_push_token().await?;
|
||||
|
||||
//Needed to register a device for push to bitwarden :
|
||||
let data = json!({
|
||||
"userId": user_uuid,
|
||||
"deviceId": device.push_uuid,
|
||||
"identifier": device.uuid,
|
||||
"type": device.atype,
|
||||
"pushToken": device.push_token
|
||||
});
|
||||
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
|
||||
get_reqwest_client()
|
||||
.post(CONFIG.push_relay_uri() + "/push/register")
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.header(ACCEPT, "application/json")
|
||||
.header(AUTHORIZATION, auth_header)
|
||||
.json(&data)
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn unregister_push_device(uuid: String) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
let auth_push_token = get_auth_push_token().await?;
|
||||
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
|
||||
match get_reqwest_client()
|
||||
.delete(CONFIG.push_relay_uri() + "/push/" + &uuid)
|
||||
.header(AUTHORIZATION, auth_header)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("An error occured during device unregistration: {e}")),
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn push_cipher_update(
|
||||
ut: UpdateType,
|
||||
cipher: &Cipher,
|
||||
acting_device_uuid: &String,
|
||||
conn: &mut crate::db::DbConn,
|
||||
) {
|
||||
// We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too.
|
||||
if cipher.organization_uuid.is_some() {
|
||||
return;
|
||||
};
|
||||
let user_uuid = match &cipher.user_uuid {
|
||||
Some(c) => c,
|
||||
None => {
|
||||
debug!("Cipher has no uuid");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if Device::check_user_has_push_device(user_uuid, conn).await {
|
||||
send_to_push_relay(json!({
|
||||
"userId": user_uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_uuid,
|
||||
"identifier": acting_device_uuid,
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"id": cipher.uuid,
|
||||
"userId": cipher.user_uuid,
|
||||
"organizationId": (),
|
||||
"revisionDate": cipher.updated_at
|
||||
}
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_logout(user: &User, acting_device_uuid: Option<String>) {
|
||||
let acting_device_uuid: Value = acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| Value::Null);
|
||||
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user.uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_uuid,
|
||||
"identifier": acting_device_uuid,
|
||||
"type": UpdateType::LogOut as i32,
|
||||
"payload": {
|
||||
"userId": user.uuid,
|
||||
"date": user.updated_at
|
||||
}
|
||||
})));
|
||||
}
|
||||
|
||||
pub fn push_user_update(ut: UpdateType, user: &User) {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user.uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": (),
|
||||
"identifier": (),
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"userId": user.uuid,
|
||||
"date": user.updated_at
|
||||
}
|
||||
})));
|
||||
}
|
||||
|
||||
pub async fn push_folder_update(
|
||||
ut: UpdateType,
|
||||
folder: &Folder,
|
||||
acting_device_uuid: &String,
|
||||
conn: &mut crate::db::DbConn,
|
||||
) {
|
||||
if Device::check_user_has_push_device(&folder.user_uuid, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": folder.user_uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_uuid,
|
||||
"identifier": acting_device_uuid,
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"id": folder.uuid,
|
||||
"userId": folder.user_uuid,
|
||||
"revisionDate": folder.updated_at
|
||||
}
|
||||
})));
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_uuid: &String, conn: &mut crate::db::DbConn) {
|
||||
if let Some(s) = &send.user_uuid {
|
||||
if Device::check_user_has_push_device(s, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": send.user_uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_uuid,
|
||||
"identifier": acting_device_uuid,
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"id": send.uuid,
|
||||
"userId": send.user_uuid,
|
||||
"revisionDate": send.revision_date
|
||||
}
|
||||
})));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_to_push_relay(notification_data: Value) {
|
||||
if !CONFIG.push_enabled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let auth_push_token = match get_auth_push_token().await {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
debug!("Could not get the auth push token: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
|
||||
if let Err(e) = get_reqwest_client()
|
||||
.post(CONFIG.push_relay_uri() + "/push/send")
|
||||
.header(ACCEPT, "application/json")
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.header(AUTHORIZATION, &auth_header)
|
||||
.json(¬ification_data)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
error!("An error occured while sending a send update to the push relay: {}", e);
|
||||
};
|
||||
}
|
@@ -4,7 +4,8 @@ use rocket::{fs::NamedFile, http::ContentType, response::content::RawHtml as Htm
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{core::now, ApiResult},
|
||||
api::{core::now, ApiResult, EmptyResult},
|
||||
auth::decode_file_download,
|
||||
error::Error,
|
||||
util::{Cached, SafeString},
|
||||
CONFIG,
|
||||
@@ -14,9 +15,9 @@ pub fn routes() -> Vec<Route> {
|
||||
// If addding more routes here, consider also adding them to
|
||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||
if CONFIG.web_vault_enabled() {
|
||||
routes![web_index, app_id, web_files, attachments, alive, static_files]
|
||||
routes![web_index, web_index_head, app_id, web_files, attachments, alive, alive_head, static_files]
|
||||
} else {
|
||||
routes![attachments, alive, static_files]
|
||||
routes![attachments, alive, alive_head, static_files]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +44,17 @@ async fn web_index() -> Cached<Option<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false)
|
||||
}
|
||||
|
||||
#[head("/")]
|
||||
fn web_index_head() -> EmptyResult {
|
||||
// Add an explicit HEAD route to prevent uptime monitoring services from
|
||||
// generating "No matching routes for HEAD /" error messages.
|
||||
//
|
||||
// Rocket automatically implements a HEAD route when there's a matching GET
|
||||
// route, but relying on this behavior also means a spurious error gets
|
||||
// logged due to <https://github.com/SergioBenitez/Rocket/issues/1098>.
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[get("/app-id.json")]
|
||||
fn app_id() -> Cached<(ContentType, Json<Value>)> {
|
||||
let content_type = ContentType::new("application", "fido.trusted-apps+json");
|
||||
@@ -80,8 +92,13 @@ async fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true)
|
||||
}
|
||||
|
||||
#[get("/attachments/<uuid>/<file_id>")]
|
||||
async fn attachments(uuid: SafeString, file_id: SafeString) -> Option<NamedFile> {
|
||||
#[get("/attachments/<uuid>/<file_id>?<token>")]
|
||||
async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Option<NamedFile> {
|
||||
let Ok(claims) = decode_file_download(&token) else { return None };
|
||||
if claims.sub != *uuid || claims.file_id != *file_id {
|
||||
return None;
|
||||
}
|
||||
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).await.ok()
|
||||
}
|
||||
|
||||
@@ -92,9 +109,16 @@ fn alive(_conn: DbConn) -> Json<String> {
|
||||
now()
|
||||
}
|
||||
|
||||
#[head("/alive")]
|
||||
fn alive_head(_conn: DbConn) -> EmptyResult {
|
||||
// Avoid logging spurious "No matching routes for HEAD /alive" errors
|
||||
// due to <https://github.com/SergioBenitez/Rocket/issues/1098>.
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[get("/vw_static/<filename>")]
|
||||
pub fn static_files(filename: String) -> Result<(ContentType, &'static [u8]), Error> {
|
||||
match filename.as_ref() {
|
||||
pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Error> {
|
||||
match filename {
|
||||
"404.png" => Ok((ContentType::PNG, include_bytes!("../static/images/404.png"))),
|
||||
"mail-github.png" => Ok((ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||
"logo-gray.png" => Ok((ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||
@@ -102,14 +126,25 @@ pub fn static_files(filename: String) -> Result<(ContentType, &'static [u8]), Er
|
||||
"hibp.png" => Ok((ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
"vaultwarden-icon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png"))),
|
||||
"vaultwarden-favicon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-favicon.png"))),
|
||||
"404.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/404.css"))),
|
||||
"admin.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/admin.css"))),
|
||||
"admin.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin.js"))),
|
||||
"admin_settings.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_settings.js"))),
|
||||
"admin_users.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_users.js"))),
|
||||
"admin_organizations.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_organizations.js")))
|
||||
}
|
||||
"admin_diagnostics.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_diagnostics.js")))
|
||||
}
|
||||
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||
"jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))),
|
||||
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.6.2.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.2.slim.js")))
|
||||
"jquery-3.6.4.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.4.slim.js")))
|
||||
}
|
||||
_ => err!(format!("Static file not found: {}", filename)),
|
||||
_ => err!(format!("Static file not found: {filename}")),
|
||||
}
|
||||
}
|
||||
|
196
src/auth.rs
196
src/auth.rs
@@ -23,18 +23,17 @@ static JWT_DELETE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|delete", CONFI
|
||||
static JWT_VERIFYEMAIL_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|verifyemail", CONFIG.domain_origin()));
|
||||
static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.domain_origin()));
|
||||
static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.domain_origin()));
|
||||
static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin()));
|
||||
static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin()));
|
||||
|
||||
static PRIVATE_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key.\n{}", e))
|
||||
});
|
||||
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| {
|
||||
EncodingKey::from_rsa_pem(&PRIVATE_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{}", e))
|
||||
});
|
||||
static PUBLIC_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key.\n{}", e))
|
||||
let key =
|
||||
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key. \n{e}"));
|
||||
EncodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{e}"))
|
||||
});
|
||||
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
|
||||
DecodingKey::from_rsa_pem(&PUBLIC_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{}", e))
|
||||
let key = std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key. \n{e}"));
|
||||
DecodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{e}"))
|
||||
});
|
||||
|
||||
pub fn load_keys() {
|
||||
@@ -45,7 +44,7 @@ pub fn load_keys() {
|
||||
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
||||
Ok(token) => token,
|
||||
Err(e) => panic!("Error encoding jwt {}", e),
|
||||
Err(e) => panic!("Error encoding jwt {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,6 +95,14 @@ pub fn decode_send(token: &str) -> Result<BasicJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_SEND_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_api_org(token: &str) -> Result<OrgApiKeyLoginJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_ORG_API_KEY_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_file_download(token: &str) -> Result<FileDownloadClaims, Error> {
|
||||
decode_jwt(token, JWT_FILE_DOWNLOAD_ISSUER.to_string())
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoginJwtClaims {
|
||||
// Not before
|
||||
@@ -203,6 +210,60 @@ pub fn generate_emergency_access_invite_claims(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct OrgApiKeyLoginJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
pub exp: i64,
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
|
||||
pub client_id: String,
|
||||
pub client_sub: String,
|
||||
pub scope: Vec<String>,
|
||||
}
|
||||
|
||||
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
OrgApiKeyLoginJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::hours(1)).timestamp(),
|
||||
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
client_id: format!("organization.{org_id}"),
|
||||
client_sub: org_id,
|
||||
scope: vec!["api.organization".into()],
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct FileDownloadClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
pub exp: i64,
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
|
||||
pub file_id: String,
|
||||
}
|
||||
|
||||
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
FileDownloadClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(5)).timestamp(),
|
||||
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
file_id,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BasicJwtClaims {
|
||||
// Not before
|
||||
@@ -241,7 +302,7 @@ pub fn generate_admin_claims() -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(20)).timestamp(),
|
||||
exp: (time_now + Duration::minutes(CONFIG.admin_session_lifetime())).timestamp(),
|
||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||
sub: "admin_panel".to_string(),
|
||||
}
|
||||
@@ -253,7 +314,7 @@ pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(2)).timestamp(),
|
||||
iss: JWT_SEND_ISSUER.to_string(),
|
||||
sub: format!("{}/{}", send_id, file_id),
|
||||
sub: format!("{send_id}/{file_id}"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,7 +367,7 @@ impl<'r> FromRequest<'r> for Host {
|
||||
""
|
||||
};
|
||||
|
||||
format!("{}://{}", protocol, host)
|
||||
format!("{protocol}://{host}")
|
||||
};
|
||||
|
||||
Outcome::Success(Host {
|
||||
@@ -318,6 +379,7 @@ impl<'r> FromRequest<'r> for Host {
|
||||
pub struct ClientHeaders {
|
||||
pub host: String,
|
||||
pub device_type: i32,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -326,6 +388,10 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let host = try_outcome!(Host::from_request(request).await).host;
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
// When unknown or unable to parse, return 14, which is 'Unknown Browser'
|
||||
let device_type: i32 =
|
||||
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
|
||||
@@ -333,6 +399,7 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
||||
Outcome::Success(ClientHeaders {
|
||||
host,
|
||||
device_type,
|
||||
ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -341,6 +408,7 @@ pub struct Headers {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -351,6 +419,10 @@ impl<'r> FromRequest<'r> for Headers {
|
||||
let headers = request.headers();
|
||||
|
||||
let host = try_outcome!(Host::from_request(request).await).host;
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
// Get access_token
|
||||
let access_token: &str = match headers.get_one("Authorization") {
|
||||
@@ -420,6 +492,7 @@ impl<'r> FromRequest<'r> for Headers {
|
||||
host,
|
||||
device,
|
||||
user,
|
||||
ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -431,25 +504,7 @@ pub struct OrgHeaders {
|
||||
pub org_user_type: UserOrgType,
|
||||
pub org_user: UserOrganization,
|
||||
pub org_id: String,
|
||||
}
|
||||
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
// but there are cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_org_id(request: &Request<'_>) -> Option<String> {
|
||||
if let Some(Ok(org_id)) = request.param::<String>(1) {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
return Some(org_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(org_id)) = request.query_value::<String>("organizationId") {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
return Some(org_id);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -458,7 +513,28 @@ impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(Headers::from_request(request).await);
|
||||
match get_org_id(request) {
|
||||
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
// but there are cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
let url_org_id: Option<&str> = {
|
||||
let mut url_org_id = None;
|
||||
if let Some(Ok(org_id)) = request.param::<&str>(1) {
|
||||
if uuid::Uuid::parse_str(org_id).is_ok() {
|
||||
url_org_id = Some(org_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(org_id)) = request.query_value::<&str>("organizationId") {
|
||||
if uuid::Uuid::parse_str(org_id).is_ok() {
|
||||
url_org_id = Some(org_id);
|
||||
}
|
||||
}
|
||||
|
||||
url_org_id
|
||||
};
|
||||
|
||||
match url_org_id {
|
||||
Some(org_id) => {
|
||||
let mut conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
@@ -466,7 +542,7 @@ impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
};
|
||||
|
||||
let user = headers.user;
|
||||
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await {
|
||||
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn).await {
|
||||
Some(user) => {
|
||||
if user.status == UserOrgStatus::Confirmed as i32 {
|
||||
user
|
||||
@@ -490,7 +566,8 @@ impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
}
|
||||
},
|
||||
org_user,
|
||||
org_id,
|
||||
org_id: String::from(org_id),
|
||||
ip: headers.ip,
|
||||
})
|
||||
}
|
||||
_ => err_handler!("Error getting the organization id"),
|
||||
@@ -504,6 +581,7 @@ pub struct AdminHeaders {
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub client_version: Option<String>,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -520,6 +598,7 @@ impl<'r> FromRequest<'r> for AdminHeaders {
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
client_version,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Admin or Owner to call this endpoint")
|
||||
@@ -533,6 +612,7 @@ impl From<AdminHeaders> for Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
ip: h.ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -564,6 +644,7 @@ pub struct ManagerHeaders {
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -580,14 +661,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !headers.org_user.has_full_access()
|
||||
&& !Collection::has_access_by_collection_and_user_uuid(
|
||||
&col_id,
|
||||
&headers.org_user.user_uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
{
|
||||
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
||||
err_handler!("The current user isn't a manager for this collection")
|
||||
}
|
||||
}
|
||||
@@ -599,6 +673,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
@@ -612,6 +687,7 @@ impl From<ManagerHeaders> for Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
ip: h.ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -622,7 +698,9 @@ pub struct ManagerHeadersLoose {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user: UserOrganization,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -636,7 +714,9 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user: headers.org_user,
|
||||
org_user_type: headers.org_user_type,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
@@ -650,14 +730,45 @@ impl From<ManagerHeadersLoose> for Headers {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
ip: h.ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
||||
org_user.has_full_access()
|
||||
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
|
||||
}
|
||||
|
||||
impl ManagerHeaders {
|
||||
pub async fn from_loose(
|
||||
h: ManagerHeadersLoose,
|
||||
collections: &Vec<String>,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<ManagerHeaders, Error> {
|
||||
for col_id in collections {
|
||||
if uuid::Uuid::parse_str(col_id).is_err() {
|
||||
err!("Collection Id is malformed!");
|
||||
}
|
||||
if !can_access_collection(&h.org_user, col_id, conn).await {
|
||||
err!("You don't have access to all collections!");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ManagerHeaders {
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
org_user_type: h.org_user_type,
|
||||
ip: h.ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OwnerHeaders {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -671,6 +782,7 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Owner to call this endpoint")
|
||||
|
199
src/config.rs
199
src/config.rs
@@ -1,3 +1,4 @@
|
||||
use std::env::consts::EXE_SUFFIX;
|
||||
use std::process::exit;
|
||||
use std::sync::RwLock;
|
||||
|
||||
@@ -13,12 +14,12 @@ use crate::{
|
||||
|
||||
static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
||||
let data_folder = get_env("DATA_FOLDER").unwrap_or_else(|| String::from("data"));
|
||||
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{}/config.json", data_folder))
|
||||
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json"))
|
||||
});
|
||||
|
||||
pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
||||
Config::load().unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n\t{:?}\n", e);
|
||||
println!("Error loading config:\n {e:?}\n");
|
||||
exit(12)
|
||||
})
|
||||
});
|
||||
@@ -60,25 +61,37 @@ macro_rules! make_config {
|
||||
impl ConfigBuilder {
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
fn from_env() -> Self {
|
||||
match dotenvy::from_path(get_env("ENV_FILE").unwrap_or_else(|| String::from(".env"))) {
|
||||
Ok(_) => (),
|
||||
let env_file = get_env("ENV_FILE").unwrap_or_else(|| String::from(".env"));
|
||||
match dotenvy::from_path(&env_file) {
|
||||
Ok(_) => {
|
||||
println!("[INFO] Using environment file `{env_file}` for configuration.\n");
|
||||
},
|
||||
Err(e) => match e {
|
||||
dotenvy::Error::LineParse(msg, pos) => {
|
||||
panic!("Error loading the .env file:\nNear {:?} on position {}\nPlease fix and restart!\n", msg, pos);
|
||||
println!("[ERROR] Failed parsing environment file: `{env_file}`\nNear {msg:?} on position {pos}\nPlease fix and restart!\n");
|
||||
exit(255);
|
||||
},
|
||||
dotenvy::Error::Io(ioerr) => match ioerr.kind() {
|
||||
std::io::ErrorKind::NotFound => {
|
||||
println!("[INFO] No .env file found.\n");
|
||||
// Only exit if this environment variable is set, but the file was not found.
|
||||
// This prevents incorrectly configured environments.
|
||||
if let Some(env_file) = get_env::<String>("ENV_FILE") {
|
||||
println!("[ERROR] The configured ENV_FILE `{env_file}` was not found!\n");
|
||||
exit(255);
|
||||
}
|
||||
},
|
||||
std::io::ErrorKind::PermissionDenied => {
|
||||
println!("[WARNING] Permission Denied while trying to read the .env file!\n");
|
||||
println!("[ERROR] Permission denied while trying to read environment file `{env_file}`!\n");
|
||||
exit(255);
|
||||
},
|
||||
_ => {
|
||||
println!("[WARNING] Reading the .env file failed:\n{:?}\n", ioerr);
|
||||
println!("[ERROR] Reading environment file `{env_file}` failed:\n{ioerr:?}\n");
|
||||
exit(255);
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
println!("[WARNING] Reading the .env file failed:\n{:?}\n", e);
|
||||
println!("[ERROR] Reading environment file `{env_file}` failed:\n{e:?}\n");
|
||||
exit(255);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -93,6 +106,7 @@ macro_rules! make_config {
|
||||
|
||||
fn from_file(path: &str) -> Result<Self, Error> {
|
||||
let config_str = std::fs::read_to_string(path)?;
|
||||
println!("[INFO] Using saved config from `{path}` for configuration.\n");
|
||||
serde_json::from_str(&config_str).map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -112,8 +126,8 @@ macro_rules! make_config {
|
||||
|
||||
if show_overrides && !overrides.is_empty() {
|
||||
// We can't use warn! here because logging isn't setup yet.
|
||||
println!("[WARNING] The following environment variables are being overriden by the config file,");
|
||||
println!("[WARNING] please use the admin panel to make changes to them:");
|
||||
println!("[WARNING] The following environment variables are being overriden by the config.json file.");
|
||||
println!("[WARNING] Please use the admin panel to make changes to them:");
|
||||
println!("[WARNING] {}\n", overrides.join(", "));
|
||||
}
|
||||
|
||||
@@ -128,6 +142,8 @@ macro_rules! make_config {
|
||||
)+)+
|
||||
config.domain_set = _domain_set;
|
||||
|
||||
config.domain = config.domain.trim_end_matches('/').to_string();
|
||||
|
||||
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
|
||||
config.org_creation_users = config.org_creation_users.trim().to_lowercase();
|
||||
|
||||
@@ -361,6 +377,16 @@ make_config! {
|
||||
/// Websocket port
|
||||
websocket_port: u16, false, def, 3012;
|
||||
},
|
||||
push {
|
||||
/// Enable push notifications
|
||||
push_enabled: bool, false, def, false;
|
||||
/// Push relay base uri
|
||||
push_relay_uri: String, false, def, "https://push.bitwarden.com".to_string();
|
||||
/// Installation id |> The installation id from https://bitwarden.com/host
|
||||
push_installation_id: Pass, false, def, String::new();
|
||||
/// Installation key |> The installation key from https://bitwarden.com/host
|
||||
push_installation_key: Pass, false, def, String::new();
|
||||
},
|
||||
jobs {
|
||||
/// Job scheduler poll interval |> How often the job scheduler thread checks for jobs to run.
|
||||
/// Set to 0 to globally disable scheduled jobs.
|
||||
@@ -450,9 +476,9 @@ make_config! {
|
||||
invitation_expiration_hours: u32, false, def, 120;
|
||||
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
||||
emergency_access_allowed: bool, true, def, true;
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations.
|
||||
/// The changes only apply when a user changes their password. Not recommended to lower the value
|
||||
password_iterations: i32, true, def, 100_000;
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations for the password hash.
|
||||
/// The default for new users. If changed, it will be updated during login for existing users.
|
||||
password_iterations: i32, true, def, 600_000;
|
||||
/// Allow password hints |> Controls whether users can set password hints. This setting applies globally to all users.
|
||||
password_hints_allowed: bool, true, def, true;
|
||||
/// Show password hint |> Controls whether a password hint should be shown directly in the web page
|
||||
@@ -460,7 +486,7 @@ make_config! {
|
||||
/// provides unauthenticated access to potentially sensitive data.
|
||||
show_password_hint: bool, true, def, false;
|
||||
|
||||
/// Admin page token |> The token used to authenticate in this very same page. Changing it here won't deauthorize the current session
|
||||
/// Admin token/Argon2 PHC |> The plain text token or Argon2 PHC string used to authenticate in this very same page. Changing it here will not deauthorize the current session!
|
||||
admin_token: Pass, true, option;
|
||||
|
||||
/// Invitation organization name |> Name shown in the invitation emails that don't come from a specific organization
|
||||
@@ -565,6 +591,9 @@ make_config! {
|
||||
/// Max burst size for admin login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `admin_ratelimit_seconds`
|
||||
admin_ratelimit_max_burst: u32, false, def, 3;
|
||||
|
||||
/// Admin session lifetime |> Set the lifetime of admin sessions to this value (in minutes).
|
||||
admin_session_lifetime: i64, true, def, 20;
|
||||
|
||||
/// Enable groups (BETA!) (Know the risks!) |> Enables groups support for organizations (Currently contains known issues!).
|
||||
org_groups_enabled: bool, false, def, false;
|
||||
},
|
||||
@@ -584,7 +613,7 @@ make_config! {
|
||||
/// Global Duo settings (Note that users can override them)
|
||||
duo: _enable_duo {
|
||||
/// Enabled
|
||||
_enable_duo: bool, true, def, false;
|
||||
_enable_duo: bool, true, def, true;
|
||||
/// Integration Key
|
||||
duo_ikey: String, true, option;
|
||||
/// Secret Key
|
||||
@@ -599,6 +628,10 @@ make_config! {
|
||||
smtp: _enable_smtp {
|
||||
/// Enabled
|
||||
_enable_smtp: bool, true, def, true;
|
||||
/// Use Sendmail |> Whether to send mail via the `sendmail` command
|
||||
use_sendmail: bool, true, def, false;
|
||||
/// Sendmail Command |> Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||
sendmail_command: String, true, option;
|
||||
/// Host
|
||||
smtp_host: String, true, option;
|
||||
/// DEPRECATED smtp_ssl |> DEPRECATED - Please use SMTP_SECURITY
|
||||
@@ -638,7 +671,7 @@ make_config! {
|
||||
/// Email 2FA Settings
|
||||
email_2fa: _enable_email_2fa {
|
||||
/// Enabled |> Disabling will prevent users from setting up new email 2FA and using existing email 2FA configured
|
||||
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && c.smtp_host.is_some();
|
||||
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && (c.smtp_host.is_some() || c.use_sendmail);
|
||||
/// Email token size |> Number of digits in an email 2FA token (min: 6, max: 255). Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting.
|
||||
email_token_size: u8, true, def, 6;
|
||||
/// Token expiration time |> Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||
@@ -660,9 +693,19 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.password_iterations < 100_000 {
|
||||
err!("PASSWORD_ITERATIONS should be at least 100000 or higher. The default is 600000!");
|
||||
}
|
||||
|
||||
let limit = 256;
|
||||
if cfg.database_max_conns < 1 || cfg.database_max_conns > limit {
|
||||
err!(format!("`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {}.", limit,));
|
||||
err!(format!("`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {limit}.",));
|
||||
}
|
||||
|
||||
if let Some(log_file) = &cfg.log_file {
|
||||
if std::fs::OpenOptions::new().append(true).create(true).open(log_file).is_err() {
|
||||
err!("Unable to write to log file", log_file);
|
||||
}
|
||||
}
|
||||
|
||||
let dom = cfg.domain.to_lowercase();
|
||||
@@ -691,6 +734,17 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.push_enabled && (cfg.push_installation_id == String::new() || cfg.push_installation_key == String::new()) {
|
||||
err!(
|
||||
"Misconfigured Push Notification service\n\
|
||||
########################################################################################\n\
|
||||
# It looks like you enabled Push Notification feature, but didn't configure it #\n\
|
||||
# properly. Make sure the installation id and key from https://bitwarden.com/host are #\n\
|
||||
# added to your configuration. #\n\
|
||||
########################################################################################\n"
|
||||
)
|
||||
}
|
||||
|
||||
if cfg._enable_duo
|
||||
&& (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some())
|
||||
&& !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some())
|
||||
@@ -698,8 +752,17 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
err!("All Duo options need to be set for global Duo support")
|
||||
}
|
||||
|
||||
if cfg._enable_yubico && cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() {
|
||||
err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` need to be set for Yubikey OTP support")
|
||||
if cfg._enable_yubico {
|
||||
if cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() {
|
||||
err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` must be set for Yubikey OTP support")
|
||||
}
|
||||
|
||||
if let Some(yubico_server) = &cfg.yubico_server {
|
||||
let yubico_server = yubico_server.to_lowercase();
|
||||
if !yubico_server.starts_with("https://") {
|
||||
err!("`YUBICO_SERVER` must be a valid URL and start with 'https://'. Either unset this variable or provide a valid URL.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cfg._enable_smtp {
|
||||
@@ -710,20 +773,51 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
),
|
||||
}
|
||||
|
||||
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
|
||||
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
|
||||
if cfg.use_sendmail {
|
||||
let command = cfg.sendmail_command.clone().unwrap_or_else(|| format!("sendmail{EXE_SUFFIX}"));
|
||||
|
||||
let mut path = std::path::PathBuf::from(&command);
|
||||
|
||||
if !path.is_absolute() {
|
||||
match which::which(&command) {
|
||||
Ok(result) => path = result,
|
||||
Err(_) => err!(format!("sendmail command {command:?} not found in $PATH")),
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.smtp_host.is_some() && !cfg.smtp_from.contains('@') {
|
||||
err!("SMTP_FROM does not contain a mandatory @ sign")
|
||||
match path.metadata() {
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
err!(format!("sendmail command not found at `{path:?}`"))
|
||||
}
|
||||
Err(err) => {
|
||||
err!(format!("failed to access sendmail command at `{path:?}`: {err}"))
|
||||
}
|
||||
Ok(metadata) => {
|
||||
if metadata.is_dir() {
|
||||
err!(format!("sendmail command at `{path:?}` isn't a directory"));
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
if !metadata.permissions().mode() & 0o111 != 0 {
|
||||
err!(format!("sendmail command at `{path:?}` isn't executable"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
|
||||
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support without `USE_SENDMAIL`")
|
||||
}
|
||||
|
||||
if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() {
|
||||
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication")
|
||||
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication without `USE_SENDMAIL`")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg._enable_email_2fa && (!cfg._enable_smtp || cfg.smtp_host.is_none()) {
|
||||
err!("To enable email 2FA, SMTP must be configured")
|
||||
if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !cfg.smtp_from.contains('@') {
|
||||
err!("SMTP_FROM does not contain a mandatory @ sign")
|
||||
}
|
||||
|
||||
if cfg._enable_email_2fa && cfg.email_token_size < 6 {
|
||||
@@ -731,12 +825,16 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
if cfg._enable_email_2fa && !(cfg.smtp_host.is_some() || cfg.use_sendmail) {
|
||||
err!("To enable email 2FA, a mail transport must be configured")
|
||||
}
|
||||
|
||||
// Check if the icon blacklist regex is valid
|
||||
if let Some(ref r) = cfg.icon_blacklist_regex {
|
||||
let validate_regex = regex::Regex::new(r);
|
||||
match validate_regex {
|
||||
Ok(_) => (),
|
||||
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {:#?}", e)),
|
||||
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {e:#?}")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -746,12 +844,12 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
"internal" | "bitwarden" | "duckduckgo" | "google" => (),
|
||||
_ => {
|
||||
if !icon_service.starts_with("http") {
|
||||
err!(format!("Icon service URL `{}` must start with \"http\"", icon_service))
|
||||
err!(format!("Icon service URL `{icon_service}` must start with \"http\""))
|
||||
}
|
||||
match icon_service.matches("{}").count() {
|
||||
1 => (), // nominal
|
||||
0 => err!(format!("Icon service URL `{}` has no placeholder \"{{}}\"", icon_service)),
|
||||
_ => err!(format!("Icon service URL `{}` has more than one placeholder \"{{}}\"", icon_service)),
|
||||
0 => err!(format!("Icon service URL `{icon_service}` has no placeholder \"{{}}\"")),
|
||||
_ => err!(format!("Icon service URL `{icon_service}` has more than one placeholder \"{{}}\"")),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -795,6 +893,23 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
err!("`EVENT_CLEANUP_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
if !cfg.disable_admin_token {
|
||||
match cfg.admin_token.as_ref() {
|
||||
Some(t) if t.starts_with("$argon2") => {
|
||||
if let Err(e) = argon2::password_hash::PasswordHash::new(t) {
|
||||
err!(format!("The configured Argon2 PHC in `ADMIN_TOKEN` is invalid: '{e}'"))
|
||||
}
|
||||
}
|
||||
Some(_) => {
|
||||
println!(
|
||||
"[NOTICE] You are using a plain text `ADMIN_TOKEN` which is insecure.\n\
|
||||
Please generate a secure Argon2 PHC string by using `vaultwarden hash` or `argon2`.\n\
|
||||
See: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token\n"
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -803,7 +918,7 @@ fn extract_url_origin(url: &str) -> String {
|
||||
match Url::parse(url) {
|
||||
Ok(u) => u.origin().ascii_serialization(),
|
||||
Err(e) => {
|
||||
println!("Error validating domain: {}", e);
|
||||
println!("Error validating domain: {e}");
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
@@ -1011,7 +1126,7 @@ impl Config {
|
||||
}
|
||||
pub fn mail_enabled(&self) -> bool {
|
||||
let inner = &self.inner.read().unwrap().config;
|
||||
inner._enable_smtp && inner.smtp_host.is_some()
|
||||
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
|
||||
}
|
||||
|
||||
pub fn get_duo_akey(&self) -> String {
|
||||
@@ -1086,6 +1201,7 @@ where
|
||||
// Register helpers
|
||||
hb.register_helper("case", Box::new(case_helper));
|
||||
hb.register_helper("jsesc", Box::new(js_escape_helper));
|
||||
hb.register_helper("to_json", Box::new(to_json));
|
||||
|
||||
macro_rules! reg {
|
||||
($name:expr) => {{
|
||||
@@ -1103,6 +1219,7 @@ where
|
||||
reg!("email/email_footer");
|
||||
reg!("email/email_footer_text");
|
||||
|
||||
reg!("email/admin_reset_password", ".html");
|
||||
reg!("email/change_email", ".html");
|
||||
reg!("email/delete_account", ".html");
|
||||
reg!("email/emergency_access_invite_accepted", ".html");
|
||||
@@ -1177,9 +1294,23 @@ fn js_escape_helper<'reg, 'rc>(
|
||||
|
||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||
if !no_quote {
|
||||
escaped_value = format!(""{}"", escaped_value);
|
||||
escaped_value = format!(""{escaped_value}"");
|
||||
}
|
||||
|
||||
out.write(&escaped_value)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn to_json<'reg, 'rc>(
|
||||
h: &Helper<'reg, 'rc>,
|
||||
_r: &'reg Handlebars<'_>,
|
||||
_ctx: &'rc Context,
|
||||
_rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Expected 1 parameter for \"to_json\""))?.value();
|
||||
let json = serde_json::to_string(param)
|
||||
.map_err(|e| RenderError::new(format!("Can't serialize parameter to JSON: {e}")))?;
|
||||
out.write(&json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -75,12 +75,10 @@ macro_rules! generate_connections {
|
||||
#[cfg($name)]
|
||||
impl CustomizeConnection<$ty, diesel::r2d2::Error> for DbConnOptions {
|
||||
fn on_acquire(&self, conn: &mut $ty) -> Result<(), diesel::r2d2::Error> {
|
||||
(|| {
|
||||
if !self.init_stmts.is_empty() {
|
||||
conn.batch_execute(&self.init_stmts)?;
|
||||
conn.batch_execute(&self.init_stmts).map_err(diesel::r2d2::Error::QueryError)?;
|
||||
}
|
||||
Ok(())
|
||||
})().map_err(diesel::r2d2::Error::QueryError)
|
||||
}
|
||||
})+
|
||||
|
||||
@@ -97,7 +95,7 @@ macro_rules! generate_connections {
|
||||
|
||||
impl Drop for DbConn {
|
||||
fn drop(&mut self) {
|
||||
let conn = self.conn.clone();
|
||||
let conn = Arc::clone(&self.conn);
|
||||
let permit = self.permit.take();
|
||||
|
||||
// Since connection can't be on the stack in an async fn during an
|
||||
@@ -143,21 +141,20 @@ macro_rules! generate_connections {
|
||||
}))
|
||||
.build(manager)
|
||||
.map_res("Failed to create pool")?;
|
||||
return Ok(DbPool {
|
||||
Ok(DbPool {
|
||||
pool: Some(DbPoolInner::$name(pool)),
|
||||
semaphore: Arc::new(Semaphore::new(CONFIG.database_max_conns() as usize)),
|
||||
});
|
||||
})
|
||||
}
|
||||
#[cfg(not($name))]
|
||||
#[allow(unreachable_code)]
|
||||
return unreachable!("Trying to use a DB backend when it's feature is disabled");
|
||||
unreachable!("Trying to use a DB backend when it's feature is disabled")
|
||||
},
|
||||
)+ }
|
||||
}
|
||||
// Get a connection from the pool
|
||||
pub async fn get(&self) -> Result<DbConn, Error> {
|
||||
let duration = Duration::from_secs(CONFIG.database_timeout());
|
||||
let permit = match timeout(duration, self.semaphore.clone().acquire_owned()).await {
|
||||
let permit = match timeout(duration, Arc::clone(&self.semaphore).acquire_owned()).await {
|
||||
Ok(p) => p.expect("Semaphore should be open"),
|
||||
Err(_) => {
|
||||
err!("Timeout waiting for database connection");
|
||||
@@ -170,10 +167,10 @@ macro_rules! generate_connections {
|
||||
let pool = p.clone();
|
||||
let c = run_blocking(move || pool.get_timeout(duration)).await.map_res("Error retrieving connection from pool")?;
|
||||
|
||||
return Ok(DbConn {
|
||||
Ok(DbConn {
|
||||
conn: Arc::new(Mutex::new(Some(DbConnInner::$name(c)))),
|
||||
permit: Some(permit)
|
||||
});
|
||||
})
|
||||
},
|
||||
)+ }
|
||||
}
|
||||
@@ -383,7 +380,7 @@ pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy();
|
||||
let file_date = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
diesel::sql_query(format!("VACUUM INTO '{}/db_{}.sqlite3'", db_path, file_date)).execute(conn)?;
|
||||
diesel::sql_query(format!("VACUUM INTO '{db_path}/db_{file_date}.sqlite3'")).execute(conn)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@@ -35,7 +35,8 @@ impl Attachment {
|
||||
}
|
||||
|
||||
pub fn get_url(&self, host: &str) -> String {
|
||||
format!("{}/attachments/{}/{}", host, self.cipher_uuid, self.id)
|
||||
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
|
||||
format!("{}/attachments/{}/{}?token={}", host, self.cipher_uuid, self.id, token)
|
||||
}
|
||||
|
||||
pub fn to_json(&self, host: &str) -> Value {
|
||||
@@ -51,6 +52,7 @@ impl Attachment {
|
||||
}
|
||||
}
|
||||
|
||||
use crate::auth::{encode_jwt, generate_file_download_claims};
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::EmptyResult;
|
||||
@@ -187,10 +189,15 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_all_by_ciphers(cipher_uuids: &Vec<String>, conn: &mut DbConn) -> Vec<Self> {
|
||||
// This will return all attachments linked to the user or org
|
||||
// There is no filtering done here if the user actually has access!
|
||||
// It is used to speed up the sync process, and the matching is done in a different part.
|
||||
pub async fn find_all_by_user_and_orgs(user_uuid: &str, org_uuids: &Vec<String>, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||
.or_filter(ciphers::organization_uuid.eq_any(org_uuids))
|
||||
.select(attachments::all_columns)
|
||||
.load::<AttachmentDb>(conn)
|
||||
.expect("Error loading attachments")
|
||||
|
@@ -6,7 +6,7 @@ use super::{
|
||||
Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization,
|
||||
};
|
||||
|
||||
use crate::api::core::CipherSyncData;
|
||||
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
@@ -27,7 +27,8 @@ db_object! {
|
||||
Login = 1,
|
||||
SecureNote = 2,
|
||||
Card = 3,
|
||||
Identity = 4
|
||||
Identity = 4,
|
||||
Fido2key = 5
|
||||
*/
|
||||
pub atype: i32,
|
||||
pub name: String,
|
||||
@@ -73,6 +74,33 @@ impl Cipher {
|
||||
reprompt: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
|
||||
let mut validation_errors = serde_json::Map::new();
|
||||
for (index, cipher) in cipher_data.iter().enumerate() {
|
||||
if let Some(note) = &cipher.Notes {
|
||||
if note.len() > 10_000 {
|
||||
validation_errors.insert(
|
||||
format!("Ciphers[{index}].Notes"),
|
||||
serde_json::to_value([
|
||||
"The field Notes exceeds the maximum encrypted value length of 10000 characters.",
|
||||
])
|
||||
.unwrap(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if !validation_errors.is_empty() {
|
||||
let err_json = json!({
|
||||
"message": "The model state is invalid.",
|
||||
"validationErrors" : validation_errors,
|
||||
"object": "error"
|
||||
});
|
||||
err_json!(err_json, "Import validation errors")
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
@@ -87,6 +115,7 @@ impl Cipher {
|
||||
host: &str,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
sync_type: CipherSyncType,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
use crate::util::format_date;
|
||||
@@ -107,17 +136,24 @@ impl Cipher {
|
||||
let password_history_json =
|
||||
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
|
||||
let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await {
|
||||
// We don't need these values at all for Organizational syncs
|
||||
// Skip any other database calls if this is the case and just return false.
|
||||
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
|
||||
match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
(true, true)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(false, false)
|
||||
};
|
||||
|
||||
// Get the type_data or a default to an empty json object '{}'.
|
||||
// If not passing an empty object, mobile clients will crash.
|
||||
let mut type_data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({}));
|
||||
let mut type_data_json: Value =
|
||||
serde_json::from_str(&self.data).unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||
@@ -136,10 +172,10 @@ impl Cipher {
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// data_json should always contain the following keys with every atype
|
||||
data_json["Fields"] = json!(fields_json);
|
||||
data_json["Fields"] = fields_json.clone();
|
||||
data_json["Name"] = json!(self.name);
|
||||
data_json["Notes"] = json!(self.notes);
|
||||
data_json["PasswordHistory"] = json!(password_history_json);
|
||||
data_json["PasswordHistory"] = password_history_json.clone();
|
||||
|
||||
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||
@@ -165,8 +201,6 @@ impl Cipher {
|
||||
"CreationDate": format_date(&self.created_at),
|
||||
"RevisionDate": format_date(&self.updated_at),
|
||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||
"FolderId": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string() ) } else { self.get_folder_uuid(user_uuid, conn).await },
|
||||
"Favorite": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_favorites.contains(&self.uuid) } else { self.is_favorite(user_uuid, conn).await },
|
||||
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||
"OrganizationId": self.organization_uuid,
|
||||
"Attachments": attachments_json,
|
||||
@@ -183,12 +217,6 @@ impl Cipher {
|
||||
|
||||
"Data": data_json,
|
||||
|
||||
// These values are true by default, but can be false if the
|
||||
// cipher belongs to a collection where the org owner has enabled
|
||||
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
||||
"Edit": !read_only,
|
||||
"ViewPassword": !hide_passwords,
|
||||
|
||||
"PasswordHistory": password_history_json,
|
||||
|
||||
// All Cipher types are included by default as null, but only the matching one will be populated
|
||||
@@ -196,13 +224,36 @@ impl Cipher {
|
||||
"SecureNote": null,
|
||||
"Card": null,
|
||||
"Identity": null,
|
||||
"Fido2Key": null,
|
||||
});
|
||||
|
||||
// These values are only needed for user/default syncs
|
||||
// Not during an organizational sync like `get_org_details`
|
||||
// Skip adding these fields in that case
|
||||
if sync_type == CipherSyncType::User {
|
||||
json_object["FolderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
|
||||
} else {
|
||||
self.get_folder_uuid(user_uuid, conn).await
|
||||
});
|
||||
json_object["Favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
cipher_sync_data.cipher_favorites.contains(&self.uuid)
|
||||
} else {
|
||||
self.is_favorite(user_uuid, conn).await
|
||||
});
|
||||
// These values are true by default, but can be false if the
|
||||
// cipher belongs to a collection or group where the org owner has enabled
|
||||
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
||||
json_object["Edit"] = json!(!read_only);
|
||||
json_object["ViewPassword"] = json!(!hide_passwords);
|
||||
}
|
||||
|
||||
let key = match self.atype {
|
||||
1 => "Login",
|
||||
2 => "SecureNote",
|
||||
3 => "Card",
|
||||
4 => "Identity",
|
||||
5 => "Fido2Key",
|
||||
_ => panic!("Wrong type"),
|
||||
};
|
||||
|
||||
@@ -713,6 +764,7 @@ impl Cipher {
|
||||
.or_filter(groups::access_all.eq(true)) //Access via group
|
||||
.or_filter(collections_groups::collections_uuid.is_not_null()) //Access via group
|
||||
.select(ciphers_collections::all_columns)
|
||||
.distinct()
|
||||
.load::<(String, String)>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@ db_object! {
|
||||
pub uuid: String,
|
||||
pub org_uuid: String,
|
||||
pub name: String,
|
||||
pub external_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
@@ -33,18 +34,21 @@ db_object! {
|
||||
|
||||
/// Local methods
|
||||
impl Collection {
|
||||
pub fn new(org_uuid: String, name: String) -> Self {
|
||||
Self {
|
||||
pub fn new(org_uuid: String, name: String, external_id: Option<String>) -> Self {
|
||||
let mut new_model = Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
|
||||
org_uuid,
|
||||
name,
|
||||
}
|
||||
external_id: None,
|
||||
};
|
||||
|
||||
new_model.set_external_id(external_id);
|
||||
new_model
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"ExternalId": null, // Not support by us
|
||||
"ExternalId": self.external_id,
|
||||
"Id": self.uuid,
|
||||
"OrganizationId": self.org_uuid,
|
||||
"Name": self.name,
|
||||
@@ -52,6 +56,21 @@ impl Collection {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_external_id(&mut self, external_id: Option<String>) {
|
||||
//Check if external id is empty. We don't want to have
|
||||
//empty strings in the database
|
||||
match external_id {
|
||||
Some(external_id) => {
|
||||
if external_id.is_empty() {
|
||||
self.external_id = None;
|
||||
} else {
|
||||
self.external_id = Some(external_id)
|
||||
}
|
||||
}
|
||||
None => self.external_id = None,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn to_json_details(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
@@ -64,6 +83,8 @@ impl Collection {
|
||||
Some(_) => {
|
||||
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
|
||||
(uc.read_only, uc.hide_passwords)
|
||||
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
|
||||
(cg.read_only, cg.hide_passwords)
|
||||
} else {
|
||||
(false, false)
|
||||
}
|
||||
@@ -232,6 +253,17 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0)
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
@@ -287,48 +319,96 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
None => false, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
return true;
|
||||
}
|
||||
|
||||
let user_uuid = user_uuid.to_string();
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(&self.uuid))
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
.filter(users_collections::read_only.eq(false))
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(&self.uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::read_only.eq(false)).or(// Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null().and(
|
||||
collections_groups::read_only.eq(false))
|
||||
)
|
||||
)
|
||||
)
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
None => true, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let user_uuid = user_uuid.to_string();
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(&self.uuid))
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
.filter(users_collections::hide_passwords.eq(true))
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(&self.uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::hide_passwords.eq(true)).or(// Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null().and(
|
||||
collections_groups::hide_passwords.eq(true))
|
||||
)
|
||||
)
|
||||
)
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Database methods
|
||||
@@ -346,6 +426,19 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid)))
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords))
|
||||
.load::<CollectionUserDb>(conn)
|
||||
.expect("Error loading users_collections")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn save(
|
||||
user_uuid: &str,
|
||||
collection_uuid: &str,
|
||||
@@ -429,6 +522,21 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_collection_swap_user_uuid_with_org_user_uuid(
|
||||
collection_uuid: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords))
|
||||
.load::<CollectionUserDb>(conn)
|
||||
.expect("Error loading users_collections")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_collection_and_user(
|
||||
collection_uuid: &str,
|
||||
user_uuid: &str,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
|
||||
use crate::CONFIG;
|
||||
use crate::{crypto, CONFIG};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -16,6 +16,7 @@ db_object! {
|
||||
|
||||
pub name: String,
|
||||
pub atype: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
|
||||
pub push_uuid: Option<String>,
|
||||
pub push_token: Option<String>,
|
||||
|
||||
pub refresh_token: String,
|
||||
@@ -38,6 +39,7 @@ impl Device {
|
||||
name,
|
||||
atype,
|
||||
|
||||
push_uuid: None,
|
||||
push_token: None,
|
||||
refresh_token: String::new(),
|
||||
twofactor_remember: None,
|
||||
@@ -45,9 +47,7 @@ impl Device {
|
||||
}
|
||||
|
||||
pub fn refresh_twofactor_remember(&mut self) -> String {
|
||||
use crate::crypto;
|
||||
use data_encoding::BASE64;
|
||||
|
||||
let twofactor_remember = crypto::encode_random_bytes::<180>(BASE64);
|
||||
self.twofactor_remember = Some(twofactor_remember.clone());
|
||||
|
||||
@@ -66,9 +66,7 @@ impl Device {
|
||||
) -> (String, i64) {
|
||||
// If there is no refresh token, we create one
|
||||
if self.refresh_token.is_empty() {
|
||||
use crate::crypto;
|
||||
use data_encoding::BASE64URL;
|
||||
|
||||
self.refresh_token = crypto::encode_random_bytes::<64>(BASE64URL);
|
||||
}
|
||||
|
||||
@@ -155,6 +153,35 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
.load::<DeviceDb>(conn)
|
||||
.expect("Error loading devices")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::uuid.eq(uuid))
|
||||
.first::<DeviceDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn clear_push_token_by_uuid(uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::update(devices::table)
|
||||
.filter(devices::uuid.eq(uuid))
|
||||
.set(devices::push_token.eq::<Option<String>>(None))
|
||||
.execute(conn)
|
||||
.map_res("Error removing push token")
|
||||
}}
|
||||
}
|
||||
pub async fn find_by_refresh_token(refresh_token: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
@@ -175,4 +202,26 @@ impl Device {
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
.filter(devices::push_token.is_not_null())
|
||||
.load::<DeviceDb>(conn)
|
||||
.expect("Error loading push devices")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn check_user_has_push_device(user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
.filter(devices::push_token.is_not_null())
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user