mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 10:45:57 +03:00
Compare commits
49 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
48836501bf | ||
|
f863ffb89a | ||
|
03c6ed2e07 | ||
|
efc6eb0073 | ||
|
cec1e87679 | ||
|
512b3b9b7c | ||
|
93da5091e6 | ||
|
915496c103 | ||
|
ecb31c85d6 | ||
|
d722328f05 | ||
|
cb4b683dcd | ||
|
6eaf131922 | ||
|
8933ac2ee7 | ||
|
6822e445bb | ||
|
18fbc1ccf6 | ||
|
4861f6decc | ||
|
b435ee49ad | ||
|
193f86e43e | ||
|
66a7baa67c | ||
|
18d66474e0 | ||
|
ff8db4fd78 | ||
|
b2f9af718e | ||
|
198fd2fc1d | ||
|
ec8a9c82df | ||
|
ef5e0bd4e5 | ||
|
30b408eaa9 | ||
|
e205e3b7db | ||
|
ca1a9e26d8 | ||
|
f3a1385aee | ||
|
008a2cf298 | ||
|
f0c9a7fbc3 | ||
|
9162b13123 | ||
|
480bf9b0c1 | ||
|
f96c5e8a1e | ||
|
3d4be24902 | ||
|
bf41d74501 | ||
|
01e33a4919 | ||
|
bc26bfa589 | ||
|
ccc51e7580 | ||
|
99a59bc4f3 | ||
|
a77482575a | ||
|
bbd630f1ee | ||
|
d18b793c71 | ||
|
d3a1d875d5 | ||
|
d6e0ace192 | ||
|
60cbfa59bf | ||
|
5ab7010c37 | ||
|
ad2cfd8b97 | ||
|
32543c46da |
@@ -97,6 +97,10 @@
|
|||||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||||
# ORG_EVENTS_ENABLED=false
|
# ORG_EVENTS_ENABLED=false
|
||||||
|
|
||||||
|
## Controls whether users can change their email.
|
||||||
|
## This setting applies globally to all users
|
||||||
|
# EMAIL_CHANGE_ALLOWED=true
|
||||||
|
|
||||||
## Number of days to retain events stored in the database.
|
## Number of days to retain events stored in the database.
|
||||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||||
# EVENTS_DAYS_RETAIN=
|
# EVENTS_DAYS_RETAIN=
|
||||||
|
76
.github/workflows/build.yml
vendored
76
.github/workflows/build.yml
vendored
@@ -8,9 +8,11 @@ on:
|
|||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- ".github/workflows/build.yml"
|
- ".github/workflows/build.yml"
|
||||||
@@ -18,9 +20,11 @@ on:
|
|||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
@@ -30,7 +34,6 @@ jobs:
|
|||||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||||
env:
|
env:
|
||||||
RUSTFLAGS: "-D warnings"
|
RUSTFLAGS: "-D warnings"
|
||||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -43,7 +46,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: "Checkout"
|
- name: "Checkout"
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
@@ -59,7 +62,7 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)"
|
||||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||||
else
|
else
|
||||||
@@ -71,7 +74,7 @@ jobs:
|
|||||||
|
|
||||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||||
- name: "Install rust-toolchain version"
|
- name: "Install rust-toolchain version"
|
||||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
@@ -81,22 +84,19 @@ jobs:
|
|||||||
|
|
||||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||||
- name: "Install MSRV version"
|
- name: "Install MSRV version"
|
||||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
|
||||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
# End Install the MSRV channel to be used
|
# End Install the MSRV channel to be used
|
||||||
|
|
||||||
|
# Set the current matrix toolchain version as default
|
||||||
# Enable Rust Caching
|
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||||
- uses: Swatinem/rust-cache@dd05243424bd5c0e585e4b55eb2d7615cdd32f1f # v2.5.1
|
run: |
|
||||||
with:
|
# Remove the rust-toolchain.toml
|
||||||
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
rm rust-toolchain.toml
|
||||||
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
# Set the default
|
||||||
# Only update when really needed! Use a <year>.<month>[.<inc>] format.
|
rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||||
prefix-key: "v2023.07-rust"
|
|
||||||
# End Enable Rust Caching
|
|
||||||
|
|
||||||
|
|
||||||
# Show environment
|
# Show environment
|
||||||
- name: "Show environment"
|
- name: "Show environment"
|
||||||
@@ -105,47 +105,55 @@ jobs:
|
|||||||
cargo -vV
|
cargo -vV
|
||||||
# End Show environment
|
# End Show environment
|
||||||
|
|
||||||
|
# Enable Rust Caching
|
||||||
|
- uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
|
||||||
|
with:
|
||||||
|
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||||
|
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||||
|
# Only update when really needed! Use a <year>.<month>[.<inc>] format.
|
||||||
|
prefix-key: "v2023.07-rust"
|
||||||
|
# End Enable Rust Caching
|
||||||
|
|
||||||
# Run cargo tests (In release mode to speed up future builds)
|
# Run cargo tests
|
||||||
# First test all features together, afterwards test them separately.
|
# First test all features together, afterwards test them separately.
|
||||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: test_sqlite_mysql_postgresql_mimalloc
|
id: test_sqlite_mysql_postgresql_mimalloc
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql,enable_mimalloc
|
cargo test --features sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
- name: "test features: sqlite,mysql,postgresql"
|
- name: "test features: sqlite,mysql,postgresql"
|
||||||
id: test_sqlite_mysql_postgresql
|
id: test_sqlite_mysql_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql
|
cargo test --features sqlite,mysql,postgresql
|
||||||
|
|
||||||
- name: "test features: sqlite"
|
- name: "test features: sqlite"
|
||||||
id: test_sqlite
|
id: test_sqlite
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite
|
cargo test --features sqlite
|
||||||
|
|
||||||
- name: "test features: mysql"
|
- name: "test features: mysql"
|
||||||
id: test_mysql
|
id: test_mysql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features mysql
|
cargo test --features mysql
|
||||||
|
|
||||||
- name: "test features: postgresql"
|
- name: "test features: postgresql"
|
||||||
id: test_postgresql
|
id: test_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features postgresql
|
cargo test --features postgresql
|
||||||
# End Run cargo tests
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
# Run cargo clippy, and fail on warnings
|
||||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: clippy
|
id: clippy
|
||||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||||
run: |
|
run: |
|
||||||
cargo clippy --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||||
# End Run cargo clippy
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
@@ -187,21 +195,3 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
|
||||||
# Build the binary to upload to the artifacts
|
|
||||||
- name: "build features: sqlite,mysql,postgresql"
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
run: |
|
|
||||||
cargo build --release --features sqlite,mysql,postgresql
|
|
||||||
# End Build the binary
|
|
||||||
|
|
||||||
|
|
||||||
# Upload artifact to Github Actions
|
|
||||||
- name: "Upload artifact"
|
|
||||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
with:
|
|
||||||
name: vaultwarden
|
|
||||||
path: target/release/vaultwarden
|
|
||||||
# End Upload artifact to Github Actions
|
|
||||||
|
5
.github/workflows/hadolint.yml
vendored
5
.github/workflows/hadolint.yml
vendored
@@ -13,10 +13,9 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
- name: Download hadolint
|
- name: Download hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -30,5 +29,5 @@ jobs:
|
|||||||
# Test Dockerfiles
|
# Test Dockerfiles
|
||||||
- name: Run hadolint
|
- name: Run hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored --cached | xargs hadolint
|
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||||
# End Test Dockerfiles
|
# End Test Dockerfiles
|
||||||
|
215
.github/workflows/release.yml
vendored
215
.github/workflows/release.yml
vendored
@@ -6,15 +6,15 @@ on:
|
|||||||
- ".github/workflows/release.yml"
|
- ".github/workflows/release.yml"
|
||||||
- "src/**"
|
- "src/**"
|
||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "hooks/**"
|
|
||||||
- "docker/**"
|
- "docker/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
|
|
||||||
branches: # Only on paths above
|
branches: # Only on paths above
|
||||||
- main
|
- main
|
||||||
|
- release-build-revision
|
||||||
|
|
||||||
tags: # Always, regardless of paths above
|
tags: # Always, regardless of paths above
|
||||||
- '*'
|
- '*'
|
||||||
@@ -35,23 +35,20 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
cancel_others: 'true'
|
cancel_others: 'true'
|
||||||
# Only run this when not creating a tag
|
# Only run this when not creating a tag
|
||||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
if: ${{ github.ref_type == 'branch' }}
|
||||||
|
|
||||||
docker-build:
|
docker-build:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
needs: skip_check
|
needs: skip_check
|
||||||
# Start a local docker registry to be used to generate multi-arch images.
|
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
services:
|
# TODO: Start a local docker registry to be used to extract the final Alpine static build images
|
||||||
registry:
|
# services:
|
||||||
image: registry:2
|
# registry:
|
||||||
ports:
|
# image: registry:2
|
||||||
- 5000:5000
|
# ports:
|
||||||
|
# - 5000:5000
|
||||||
env:
|
env:
|
||||||
# Use BuildKit (https://docs.docker.com/build/buildkit/) for better
|
|
||||||
# build performance and the ability to copy extended file attributes
|
|
||||||
# (e.g., for executable capabilities) across build phases.
|
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
SOURCE_COMMIT: ${{ github.sha }}
|
SOURCE_COMMIT: ${{ github.sha }}
|
||||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||||
# The *_REPO variables need to be configured as repository variables
|
# The *_REPO variables need to be configured as repository variables
|
||||||
@@ -65,7 +62,6 @@ jobs:
|
|||||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||||
# Check for Quay.io credentials in secrets
|
# Check for Quay.io credentials in secrets
|
||||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
base_image: ["debian","alpine"]
|
base_image: ["debian","alpine"]
|
||||||
@@ -73,163 +69,102 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Determine Docker Tag
|
- name: Initialize QEMU binfmt support
|
||||||
- name: Init Variables
|
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
||||||
id: vars
|
with:
|
||||||
|
platforms: "arm64,arm"
|
||||||
|
|
||||||
|
# Start Docker Buildx
|
||||||
|
- name: Setup Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||||
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
|
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
||||||
|
with:
|
||||||
|
config-inline: |
|
||||||
|
[worker.oci]
|
||||||
|
max-parallelism = 2
|
||||||
|
driver-opts: |
|
||||||
|
network=host
|
||||||
|
|
||||||
|
# Determine Base Tags and Source Version
|
||||||
|
- name: Determine Base Tags and Source Version
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# Check which main tag we are going to build determined by github.ref
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
echo "DOCKER_TAG=${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
|
||||||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
echo "DOCKER_TAG=testing" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
|
||||||
fi
|
fi
|
||||||
# End Determine Docker Tag
|
|
||||||
|
# Get the Source Version for this release
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null || true)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
echo "SOURCE_VERSION=${GIT_EXACT_TAG}" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
echo "SOURCE_VERSION=${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}" | tee -a "${GITHUB_ENV}"
|
||||||
|
fi
|
||||||
|
# End Determine Base Tags
|
||||||
|
|
||||||
# Login to Docker Hub
|
# Login to Docker Hub
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for DockerHub
|
||||||
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to GitHub Container Registry
|
# Login to GitHub Container Registry
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for ghcr.io
|
||||||
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to Quay.io
|
# Login to Quay.io
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.QUAY_USERNAME }}
|
||||||
password: ${{ secrets.QUAY_TOKEN }}
|
password: ${{ secrets.QUAY_TOKEN }}
|
||||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
|
|
||||||
# Debian
|
- name: Add registry for Quay.io
|
||||||
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
# Docker Hub
|
|
||||||
- name: Build Debian based images (docker.io)
|
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/build
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (docker.io)
|
- name: Bake ${{ matrix.base_image }} containers
|
||||||
shell: bash
|
uses: docker/bake-action@511fde2517761e303af548ec9e0ea74a8a100112 # v4.0.0
|
||||||
env:
|
env:
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||||
run: |
|
SOURCE_VERSION: "${{ env.SOURCE_VERSION }}"
|
||||||
./hooks/push
|
SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
|
||||||
|
with:
|
||||||
# GitHub Container Registry
|
pull: true
|
||||||
- name: Build Debian based images (ghcr.io)
|
push: true
|
||||||
shell: bash
|
files: docker/docker-bake.hcl
|
||||||
env:
|
targets: "${{ matrix.base_image }}-multi"
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (ghcr.io)
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Quay.io
|
|
||||||
- name: Build Debian based images (quay.io)
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (quay.io)
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Alpine
|
|
||||||
|
|
||||||
# Docker Hub
|
|
||||||
- name: Build Alpine based images (docker.io)
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (docker.io)
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# GitHub Container Registry
|
|
||||||
- name: Build Alpine based images (ghcr.io)
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (ghcr.io)
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Quay.io
|
|
||||||
- name: Build Alpine based images (quay.io)
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (quay.io)
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
43
.github/workflows/trivy.yml
vendored
Normal file
43
.github/workflows/trivy.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
name: trivy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- release-build-revision
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
pull_request:
|
||||||
|
branches: [ "main" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '00 12 * * *'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
trivy-scan:
|
||||||
|
name: Check
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
actions: read
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
||||||
|
|
||||||
|
- name: Run Trivy vulnerability scanner
|
||||||
|
uses: aquasecurity/trivy-action@f78e9ecf42a1271402d4f484518b9313235990e1 # v0.13.1
|
||||||
|
with:
|
||||||
|
scan-type: repo
|
||||||
|
ignore-unfixed: true
|
||||||
|
format: sarif
|
||||||
|
output: trivy-results.sarif
|
||||||
|
severity: CRITICAL,HIGH
|
||||||
|
|
||||||
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
|
uses: github/codeql-action/upload-sarif@bad341350a2f5616f9e048e51360cedc49181ce8 # v2.22.4
|
||||||
|
with:
|
||||||
|
sarif_file: 'trivy-results.sarif'
|
@@ -1,10 +1,12 @@
|
|||||||
ignored:
|
ignored:
|
||||||
|
# To prevent issues and make clear some images only work on linux/amd64, we ignore this
|
||||||
|
- DL3029
|
||||||
# disable explicit version for apt install
|
# disable explicit version for apt install
|
||||||
- DL3008
|
- DL3008
|
||||||
# disable explicit version for apk install
|
# disable explicit version for apk install
|
||||||
- DL3018
|
- DL3018
|
||||||
# disable check for consecutive `RUN` instructions
|
# Ignore shellcheck info message
|
||||||
- DL3059
|
- SC1091
|
||||||
trustedRegistries:
|
trustedRegistries:
|
||||||
- docker.io
|
- docker.io
|
||||||
- ghcr.io
|
- ghcr.io
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.4.0
|
rev: v4.5.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
- id: check-json
|
- id: check-json
|
||||||
|
1267
Cargo.lock
generated
1267
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
88
Cargo.toml
88
Cargo.toml
@@ -3,7 +3,7 @@ name = "vaultwarden"
|
|||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.69.0"
|
rust-version = "1.71.1"
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
@@ -40,9 +40,9 @@ syslog = "6.1.0"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.19"
|
log = "0.4.20"
|
||||||
fern = { version = "0.6.2", features = ["syslog-6"] }
|
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
||||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenvy = { version = "0.15.7", default-features = false }
|
dotenvy = { version = "0.15.7", default-features = false }
|
||||||
@@ -51,48 +51,47 @@ dotenvy = { version = "0.15.7", default-features = false }
|
|||||||
once_cell = "1.18.0"
|
once_cell = "1.18.0"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.16"
|
num-traits = "0.2.17"
|
||||||
num-derive = "0.4.0"
|
num-derive = "0.4.1"
|
||||||
|
|
||||||
# Web framework
|
# Web framework
|
||||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
rocket = { version = "0.5.0-rc.4", features = ["tls", "json"], default-features = false }
|
||||||
# rocket_ws = { version ="0.1.0-rc.3" }
|
rocket_ws = { version ="0.1.0-rc.4" }
|
||||||
rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # v0.5 branch
|
|
||||||
|
|
||||||
# WebSockets libraries
|
# WebSockets libraries
|
||||||
tokio-tungstenite = "0.19.0"
|
tokio-tungstenite = "0.20.1"
|
||||||
rmpv = "1.0.1" # MessagePack library
|
rmpv = "1.0.1" # MessagePack library
|
||||||
|
|
||||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||||
dashmap = "5.5.0"
|
dashmap = "5.5.3"
|
||||||
|
|
||||||
# Async futures
|
# Async futures
|
||||||
futures = "0.3.28"
|
futures = "0.3.29"
|
||||||
tokio = { version = "1.30.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
tokio = { version = "1.34.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.183", features = ["derive"] }
|
serde = { version = "1.0.192", features = ["derive"] }
|
||||||
serde_json = "1.0.104"
|
serde_json = "1.0.108"
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "2.1.0", features = ["chrono", "r2d2"] }
|
diesel = { version = "2.1.4", features = ["chrono", "r2d2"] }
|
||||||
diesel_migrations = "2.1.0"
|
diesel_migrations = "2.1.0"
|
||||||
diesel_logger = { version = "0.3.0", optional = true }
|
diesel_logger = { version = "0.3.0", optional = true }
|
||||||
|
|
||||||
# Bundled/Static SQLite
|
# Bundled/Static SQLite
|
||||||
libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.27.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||||
ring = "0.16.20"
|
ring = "0.17.5"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "1.4.1", features = ["v4"] }
|
uuid = { version = "1.5.0", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.26", features = ["clock", "serde"], default-features = false }
|
chrono = { version = "0.4.31", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.8.3"
|
chrono-tz = "0.8.4"
|
||||||
time = "0.3.25"
|
time = "0.3.30"
|
||||||
|
|
||||||
# Job scheduler
|
# Job scheduler
|
||||||
job_scheduler_ng = "2.0.4"
|
job_scheduler_ng = "2.0.4"
|
||||||
@@ -101,10 +100,10 @@ job_scheduler_ng = "2.0.4"
|
|||||||
data-encoding = "2.4.0"
|
data-encoding = "2.4.0"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "8.3.0"
|
jsonwebtoken = "9.1.0"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
totp-lite = "2.0.0"
|
totp-lite = "2.0.1"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||||
@@ -113,34 +112,37 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
|||||||
webauthn-rs = "0.3.2"
|
webauthn-rs = "0.3.2"
|
||||||
|
|
||||||
# Handling of URL's for WebAuthn and favicons
|
# Handling of URL's for WebAuthn and favicons
|
||||||
url = "2.4.0"
|
url = "2.4.1"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.10.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
lettre = { version = "0.11.1", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||||
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
||||||
email_address = "0.2.4"
|
email_address = "0.2.4"
|
||||||
|
|
||||||
# HTML Template library
|
# HTML Template library
|
||||||
handlebars = { version = "4.3.7", features = ["dir_source"] }
|
handlebars = { version = "4.5.0", features = ["dir_source"] }
|
||||||
|
|
||||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||||
reqwest = { version = "0.11.18", features = ["stream", "json", "deflate", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
|
reqwest = { version = "0.11.22", features = ["stream", "json", "deflate", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
|
||||||
|
|
||||||
# Favicon extraction libraries
|
# Favicon extraction libraries
|
||||||
html5gum = "0.5.7"
|
html5gum = "0.5.7"
|
||||||
regex = { version = "1.9.3", features = ["std", "perf", "unicode-perl"], default-features = false }
|
regex = { version = "1.10.2", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
data-url = "0.3.0"
|
data-url = "0.3.0"
|
||||||
bytes = "1.4.0"
|
bytes = "1.5.0"
|
||||||
|
|
||||||
# Cache function results (Used for version check and favicon fetching)
|
# Cache function results (Used for version check and favicon fetching)
|
||||||
cached = "0.44.0"
|
cached = { version = "0.46.1", features = ["async"] }
|
||||||
|
|
||||||
# Used for custom short lived cookie jar during favicon extraction
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
cookie = "0.16.2"
|
cookie = "0.16.2"
|
||||||
cookie_store = "0.19.1"
|
cookie_store = "0.19.1"
|
||||||
|
|
||||||
# Used by U2F, JWT and PostgreSQL
|
# Used by U2F, JWT and PostgreSQL
|
||||||
openssl = "0.10.56"
|
openssl = "=0.10.57"
|
||||||
|
# Set openssl-sys fixed to v0.9.92 to prevent building issues with musl, arm and 32bit pointer width
|
||||||
|
# It will force add a dynamically linked library which prevents the build from being static
|
||||||
|
openssl-sys = "=0.9.92"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
pico-args = "0.5.0"
|
pico-args = "0.5.0"
|
||||||
@@ -150,22 +152,19 @@ paste = "1.0.14"
|
|||||||
governor = "0.6.0"
|
governor = "0.6.0"
|
||||||
|
|
||||||
# Check client versions for specific features.
|
# Check client versions for specific features.
|
||||||
semver = "1.0.18"
|
semver = "1.0.20"
|
||||||
|
|
||||||
# Allow overriding the default memory allocator
|
# Allow overriding the default memory allocator
|
||||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
mimalloc = { version = "0.1.37", features = ["secure"], default-features = false, optional = true }
|
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
|
||||||
which = "4.4.0"
|
which = "5.0.0"
|
||||||
|
|
||||||
# Argon2 library with support for the PHC format
|
# Argon2 library with support for the PHC format
|
||||||
argon2 = "0.5.1"
|
argon2 = "0.5.2"
|
||||||
|
|
||||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||||
rpassword = "7.2.0"
|
rpassword = "7.3.1"
|
||||||
|
|
||||||
[patch.crates-io]
|
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
|
|
||||||
# Strip debuginfo from the release builds
|
# Strip debuginfo from the release builds
|
||||||
# Also enable thin LTO for some optimizations
|
# Also enable thin LTO for some optimizations
|
||||||
@@ -173,11 +172,12 @@ rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf
|
|||||||
strip = "debuginfo"
|
strip = "debuginfo"
|
||||||
lto = "thin"
|
lto = "thin"
|
||||||
|
|
||||||
# Always build argon2 using opt-level 3
|
|
||||||
# This is a huge speed improvement during testing
|
|
||||||
[profile.dev.package.argon2]
|
|
||||||
opt-level = 3
|
|
||||||
|
|
||||||
# A little bit of a speedup
|
# A little bit of a speedup
|
||||||
[profile.dev]
|
[profile.dev]
|
||||||
split-debuginfo = "unpacked"
|
split-debuginfo = "unpacked"
|
||||||
|
|
||||||
|
# Always build argon2 using opt-level 3
|
||||||
|
# This is a huge speed improvement during testing
|
||||||
|
[profile.dev.package.argon2]
|
||||||
|
opt-level = 3
|
||||||
|
@@ -1 +1 @@
|
|||||||
docker/amd64/Dockerfile
|
docker/Dockerfile.debian
|
@@ -42,7 +42,7 @@ docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p
|
|||||||
```
|
```
|
||||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
**IMPORTANT**: Most modern web browsers, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||||
|
|
||||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||||
|
|
||||||
|
2
build.rs
2
build.rs
@@ -18,7 +18,7 @@ fn main() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||||
compile_error!("Query Logging is only allowed during development, it is not intented for production usage!");
|
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
|
||||||
|
|
||||||
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||||
// If neither exist, read from git.
|
// If neither exist, read from git.
|
||||||
|
28
docker/DockerSettings.yaml
Normal file
28
docker/DockerSettings.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
vault_version: "v2023.10.0"
|
||||||
|
vault_image_digest: "sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935"
|
||||||
|
# Cross Compile Docker Helper Scripts v1.3.0
|
||||||
|
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||||
|
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
|
||||||
|
rust_version: 1.73.0 # Rust version to be used
|
||||||
|
debian_version: bookworm # Debian release name to be used
|
||||||
|
alpine_version: 3.18 # Alpine version to be used
|
||||||
|
# For which platforms/architectures will we try to build images
|
||||||
|
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
# Determine the build images per OS/Arch
|
||||||
|
build_stage_image:
|
||||||
|
debian:
|
||||||
|
image: "docker.io/library/rust:{{rust_version}}-slim-{{debian_version}}"
|
||||||
|
platform: "$BUILDPLATFORM"
|
||||||
|
alpine:
|
||||||
|
image: "build_${TARGETARCH}${TARGETVARIANT}"
|
||||||
|
platform: "linux/amd64" # The Alpine build images only have linux/amd64 images
|
||||||
|
arch_image:
|
||||||
|
amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}"
|
||||||
|
arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}"
|
||||||
|
armv7: "ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-{{rust_version}}"
|
||||||
|
armv6: "ghcr.io/blackdex/rust-musl:arm-musleabi-stable-{{rust_version}}"
|
||||||
|
# The final image which will be used to distribute the container images
|
||||||
|
runtime_stage_image:
|
||||||
|
debian: "docker.io/library/debian:{{debian_version}}-slim"
|
||||||
|
alpine: "docker.io/library/alpine:{{alpine_version}}"
|
160
docker/Dockerfile.alpine
Normal file
160
docker/Dockerfile.alpine
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2023.10.0]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
|
||||||
|
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.73.0 as build_amd64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.73.0 as build_arm64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.73.0 as build_armv7
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.73.0 as build_armv6
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root" \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Shared variables across Debian and Alpine
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
touch src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.18
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apk --no-cache add \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
openssl \
|
||||||
|
tzdata
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,34 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
|
||||||
# manifest, rather than the target arch. For example:
|
|
||||||
#
|
|
||||||
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
|
||||||
# amd64
|
|
||||||
#
|
|
||||||
# Recent versions of Docker have started printing a warning when the image's
|
|
||||||
# claimed arch doesn't match the host arch. For example:
|
|
||||||
#
|
|
||||||
# WARNING: The requested image's platform (linux/amd64) does not match the
|
|
||||||
# detected host platform (linux/arm/v7) and no specific platform was requested
|
|
||||||
#
|
|
||||||
# The image still works fine, but the spurious warning creates confusion.
|
|
||||||
#
|
|
||||||
# Docker doesn't seem to provide a way to directly set the arch of an image
|
|
||||||
# at build time. To resolve the build vs. target arch discrepancy, we use
|
|
||||||
# Docker Buildx to build a new set of images with the correct target arch.
|
|
||||||
#
|
|
||||||
# Docker Buildx uses this Dockerfile to build an image for each requested
|
|
||||||
# platform. Since the Dockerfile basically consists of a single `FROM`
|
|
||||||
# instruction, we're effectively telling Buildx to build a platform-specific
|
|
||||||
# image by simply copying the existing cross-built image and setting the
|
|
||||||
# correct target arch as a side effect.
|
|
||||||
#
|
|
||||||
# References:
|
|
||||||
#
|
|
||||||
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
|
||||||
#
|
|
||||||
ARG LOCAL_REPO
|
|
||||||
ARG DOCKER_TAG
|
|
||||||
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
|
194
docker/Dockerfile.debian
Normal file
194
docker/Dockerfile.debian
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2023.10.0]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
|
||||||
|
|
||||||
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.73.0-slim-bookworm as build
|
||||||
|
COPY --from=xx / /
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
|
RUN xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
|
&& rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Environment variables for cargo across Debian and Alpine
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
if xx-info is-cross ; then \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
touch src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/debian:bookworm-slim
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,68 +1,14 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
{% set rust_version = "1.71.1" %}
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
{% set debian_version = "bookworm" %}
|
|
||||||
{% set alpine_version = "3.17" %}
|
|
||||||
{% set build_stage_base_image = "docker.io/library/rust:%s-%s" % (rust_version, debian_version) %}
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "amd64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:x86_64-musl-stable-%s-openssl3" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/library/alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:armv7-musleabihf-stable-%s-openssl3" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:arm-musleabi-stable-%s-openssl3" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:aarch64-musl-stable-%s-openssl3" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
|
||||||
{% endif %}
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/library/debian:%s-slim" % debian_version %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "arm64" %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
|
||||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "armel" %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "armhf" %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_name is defined %}
|
|
||||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_prefix = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_target_param = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if "buildkit" in target_file %}
|
|
||||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
|
||||||
{% else %}
|
|
||||||
{% set mount_rust_cache = "" %}
|
|
||||||
{% endif %}
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
{% set vault_version = "v2023.7.1" %}
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_image_digest = "sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f" %}
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# Using the digest instead of the tag name provides better security,
|
# Using the digest instead of the tag name provides better security,
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
@@ -80,10 +26,33 @@
|
|||||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||||
#
|
#
|
||||||
FROM docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
{% if base == "debian" %}
|
||||||
FROM {{ build_stage_base_image }} as build
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
{% for arch in build_stage_image[base].arch_image %}
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build
|
||||||
|
{% if base == "debian" %}
|
||||||
|
COPY --from=xx / /
|
||||||
|
{% endif %}
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
@@ -91,132 +60,162 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
|||||||
TZ=UTC \
|
TZ=UTC \
|
||||||
TERM=xterm-256color \
|
TERM=xterm-256color \
|
||||||
CARGO_HOME="/root/.cargo" \
|
CARGO_HOME="/root/.cargo" \
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
USER="root"
|
||||||
|
{%- if base == "alpine" %} \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if base == "debian" %}
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
|
RUN xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
&& rustup set profile minimal
|
&& rustup set profile minimal
|
||||||
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
|
||||||
# Debian Bookworm already contains libpq v15
|
|
||||||
ENV PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
|
||||||
{% if "armv6" in target_file %}
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=-latomic'
|
|
||||||
{% endif %}
|
|
||||||
{% elif "arm" in target_file %}
|
|
||||||
# Install build dependencies for the {{ package_arch_name }} architecture
|
|
||||||
RUN {{ mount_rust_cache -}} dpkg --add-architecture {{ package_arch_name }} \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-{{ package_cross_compiler }} \
|
|
||||||
libc6-dev{{ package_arch_prefix }} \
|
|
||||||
libmariadb-dev{{ package_arch_prefix }} \
|
|
||||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
|
||||||
libmariadb3{{ package_arch_prefix }} \
|
|
||||||
libpq-dev{{ package_arch_prefix }} \
|
|
||||||
libpq5{{ package_arch_prefix }} \
|
|
||||||
libssl-dev{{ package_arch_prefix }} \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
{% if base == "debian" %}
|
||||||
COPY ./Cargo.* ./
|
# Environment variables for cargo across Debian and Alpine
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
RUN source /env-cargo && \
|
||||||
COPY ./build.rs ./build.rs
|
if xx-info is-cross ; then \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
{% if package_arch_target is defined %}
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
{% endif %}
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
{% if "alpine" in target_file %}
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
# Shared variables across Debian and Alpine
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
{% else %}
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
RUN source /env-cargo && \
|
||||||
&& find . -not -path "./target*" -delete
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
# Builds again, this time it will be the actual source files being build
|
||||||
RUN touch src/main.rs
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
touch src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM {{ runtime_stage_base_image }}
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM {{ runtime_stage_image[base] }}
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
ENV ROCKET_PROFILE="release" \
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ROCKET_PORT=80
|
ROCKET_PORT=80
|
||||||
{%- if "alpine" in runtime_stage_base_image %} \
|
{%- if base == "debian" %} \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
{% elif base == "alpine" %} \
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN mkdir /data \
|
RUN mkdir /data && \
|
||||||
{% if "alpine" in runtime_stage_base_image %}
|
{% if base == "debian" %}
|
||||||
&& apk add --no-cache \
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
apk --no-cache add \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
openssl \
|
openssl \
|
||||||
tzdata
|
tzdata
|
||||||
{% else %}
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
@@ -226,16 +225,13 @@ EXPOSE 3012
|
|||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
|
||||||
{% else %}
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
@@ -1,15 +1,4 @@
|
|||||||
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
all:
|
||||||
|
./render_template Dockerfile.j2 '{"base": "debian"}' > Dockerfile.debian
|
||||||
all: $(OBJECTS)
|
./render_template Dockerfile.j2 '{"base": "alpine"}' > Dockerfile.alpine
|
||||||
|
.PHONY: all
|
||||||
%/Dockerfile: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
184
docker/README.md
184
docker/README.md
@@ -1,3 +1,183 @@
|
|||||||
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
# Vaultwarden Container Building
|
||||||
|
|
||||||
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
To build and release new testing and stable releases of Vaultwarden we use `docker buildx bake`.<br>
|
||||||
|
This can be used locally by running the command yourself, but it is also used by GitHub Actions.
|
||||||
|
|
||||||
|
This makes it easier for us to test and maintain the different architectures we provide.<br>
|
||||||
|
We also just have two Dockerfile's one for Debian and one for Alpine based images.<br>
|
||||||
|
With just these two files we can build both Debian and Alpine images for the following platforms:
|
||||||
|
- amd64 (linux/amd64)
|
||||||
|
- arm64 (linux/arm64)
|
||||||
|
- armv7 (linux/arm/v7)
|
||||||
|
- armv6 (linux/arm/v6)
|
||||||
|
|
||||||
|
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||||
|
This ensures the container build process can run binaries from other architectures.<br>
|
||||||
|
|
||||||
|
**NOTE**: Run all the examples below from the root of the repo.<br>
|
||||||
|
|
||||||
|
|
||||||
|
## How to install QEMU binfmt support
|
||||||
|
|
||||||
|
This is different per host OS, but most support this in some way.<br>
|
||||||
|
|
||||||
|
### Ubuntu/Debian
|
||||||
|
```bash
|
||||||
|
apt install binfmt-support qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Arch Linux (others based upon it)
|
||||||
|
```bash
|
||||||
|
pacman -S qemu-user-static qemu-user-static-binfmt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fedora
|
||||||
|
```bash
|
||||||
|
dnf install qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Others
|
||||||
|
There also is an option to use an other docker container to provide support for this.
|
||||||
|
```bash
|
||||||
|
# To install and activate
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To unistall
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Single architecture container building
|
||||||
|
|
||||||
|
You can build a container per supported architecture as long as you have QEMU binfmt support installed on your system.<br>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Default bake triggers a Debian build using the hosts architecture
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl
|
||||||
|
|
||||||
|
# Bake Debian ARM64 using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl debian-arm64
|
||||||
|
|
||||||
|
# Bake Alpine ARMv6 as a release build
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-armv6
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Local Multi Architecture container building
|
||||||
|
|
||||||
|
Start the initialization, this only needs to be done once.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create and use a new buildx builder instance which connects to the host network
|
||||||
|
docker buildx create --name vaultwarden --use --driver-opt network=host
|
||||||
|
|
||||||
|
# Validate it runs
|
||||||
|
docker buildx inspect --bootstrap
|
||||||
|
|
||||||
|
# Create a local container registry directly reachable on the localhost
|
||||||
|
docker run -d --name registry --network host registry:2
|
||||||
|
```
|
||||||
|
|
||||||
|
After that is done, you should be able to build and push to the local registry.<br>
|
||||||
|
Use the following command with the modified variables to bake the Alpine images.<br>
|
||||||
|
Replace `alpine` with `debian` if you want to build the debian multi arch images.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start a buildx bake using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `bake.sh` script
|
||||||
|
|
||||||
|
To make it a bit more easier to trigger a build, there also is a `bake.sh` script.<br>
|
||||||
|
This script calls `docker buildx bake` with all the right parameters and also generates the `SOURCE_COMMIT` and `SOURCE_VERSION` variables.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
So, if you want to build a Multi Arch Alpine container pushing to your localhost registry you can run this from within the docker directory. (Just make sure you executed the initialization steps above first)
|
||||||
|
```bash
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
./bake.sh alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append both `alpine` and `debian` with `-amd64`, `-arm64`, `-armv7` or `-armv6`, which will trigger a build for that specific platform.<br>
|
||||||
|
This will also append those values to the tag so you can see the builded container when running `docker images`.
|
||||||
|
|
||||||
|
You can also append extra arguments after the target if you want. This can be useful for example to print what bake will use.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh alpine-all --print
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing baked images
|
||||||
|
|
||||||
|
To test these images you can run these images by using the correct tag and provide the platform.<br>
|
||||||
|
For example, after you have build an arm64 image via `./bake.sh debian-arm64` you can run:
|
||||||
|
```bash
|
||||||
|
docker run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `podman-bake.sh` script
|
||||||
|
|
||||||
|
To also make building easier using podman, there is a `podman-bake.sh` script.<br>
|
||||||
|
This script calls `podman buildx build` with the needed parameters and the same as `bake.sh`, it will generate some variables automatically.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
**NOTE:** Unlike the `bake.sh` script, this only supports a single `CONTAINER_REGISTRIES`, and a single `BASE_TAGS` value, no comma separated values. It also only supports building separate architectures, no Multi Arch containers.
|
||||||
|
|
||||||
|
To build an Alpine arm64 image with only sqlite support and mimalloc, run this:
|
||||||
|
```bash
|
||||||
|
DB="sqlite,enable_mimalloc" \
|
||||||
|
./podman-bake.sh alpine-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/podman-bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append extra arguments after the target if you want. This can be useful for example to disable cache like this.
|
||||||
|
```bash
|
||||||
|
./podman-bake.sh alpine-arm64 --no-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
For the podman builds you can, just like the `bake.sh` script, also append the architecture to build for that specific platform.<br>
|
||||||
|
|
||||||
|
### Testing podman builded images
|
||||||
|
|
||||||
|
The command to start a podman built container is almost the same as for the docker/bake built containers. The images start with `localhost/`, so you need to prepend that.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
localhost/vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Variables supported
|
||||||
|
| Variable | default | description |
|
||||||
|
| --------------------- | ------------------ | ----------- |
|
||||||
|
| CARGO_PROFILE | null | Which cargo profile to use. `null` means what is defined in the Dockerfile |
|
||||||
|
| DB | null | Which `features` to build. `null` means what is defined in the Dockerfile |
|
||||||
|
| SOURCE_REPOSITORY_URL | null | The source repository form where this build is triggered |
|
||||||
|
| SOURCE_COMMIT | null | The commit hash of the current commit for this build |
|
||||||
|
| SOURCE_VERSION | null | The current exact tag of this commit, else the last tag and the first 8 chars of the source commit |
|
||||||
|
| BASE_TAGS | testing | Tags to be used. Can be a comma separated value like "latest,1.29.2" |
|
||||||
|
| CONTAINER_REGISTRIES | vaultwarden/server | Comma separated value of container registries. Like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server` |
|
||||||
|
| VW_VERSION | null | To override the `SOURCE_VERSION` value. This is also used by the `build.rs` code for example |
|
||||||
|
@@ -1,119 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.71.1-bookworm as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/debian:bookworm-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,116 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.71.1-openssl3 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
|
||||||
# Debian Bookworm already contains libpq v15
|
|
||||||
ENV PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,119 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.71.1-bookworm as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/debian:bookworm-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,116 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.71.1-openssl3 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
|
||||||
# Debian Bookworm already contains libpq v15
|
|
||||||
ENV PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,140 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.71.1-bookworm as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-debian:bookworm
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,118 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.71.1-openssl3 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
|
||||||
# Debian Bookworm already contains libpq v15
|
|
||||||
ENV PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,140 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.71.1-bookworm as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-debian:bookworm
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,118 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.71.1-openssl3 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
|
||||||
# Debian Bookworm already contains libpq v15
|
|
||||||
ENV PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,140 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.71.1-bookworm as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-debian:bookworm
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,120 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.71.1-openssl3 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
|
||||||
# Debian Bookworm already contains libpq v15
|
|
||||||
ENV PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=-latomic'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,140 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.71.1-bookworm as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-debian:bookworm
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,120 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.71.1-openssl3 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
|
||||||
# Debian Bookworm already contains libpq v15
|
|
||||||
ENV PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=-latomic'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,140 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.71.1-bookworm as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-debian:bookworm
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,118 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.71.1-openssl3 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
|
||||||
# Debian Bookworm already contains libpq v15
|
|
||||||
ENV PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,140 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.71.1-bookworm as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-debian:bookworm
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
@@ -1,118 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.7.1
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.7.1]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:b306f38fe0d54fa3d79059a737f8e1803da44ddc5f273c2aecdd6a4886211b0f as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.71.1-openssl3 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
|
||||||
# Debian Bookworm already contains libpq v15
|
|
||||||
ENV PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
15
docker/bake.sh
Executable file
15
docker/bake.sh
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Make sure we set the context to `..` so it will go up one directory
|
||||||
|
docker buildx bake --progress plain --set "*.context=${BASEDIR}/.." -f "${BASEDIR}/docker-bake.hcl" "$@"
|
33
docker/bake_env.sh
Normal file
33
docker/bake_env.sh
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# If SOURCE_COMMIT is provided via env skip this
|
||||||
|
if [ -z "${SOURCE_COMMIT+x}" ]; then
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If VW_VERSION is provided via env use it as SOURCE_VERSION
|
||||||
|
# Else define it using git
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
SOURCE_VERSION="${VW_VERSION}"
|
||||||
|
else
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||||
|
GIT_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||||
|
case "${GIT_BRANCH}" in
|
||||||
|
main|master|HEAD)
|
||||||
|
# Do not add the branch name for these branches
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
SOURCE_VERSION="${SOURCE_VERSION} (${GIT_BRANCH})"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export the rendered variables above so bake will use them
|
||||||
|
export SOURCE_COMMIT
|
||||||
|
export SOURCE_VERSION
|
235
docker/docker-bake.hcl
Normal file
235
docker/docker-bake.hcl
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
// ==== Baking Variables ====
|
||||||
|
|
||||||
|
// Set which cargo profile to use, dev or release for example
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "CARGO_PROFILE" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set which DB's (features) to enable
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "DB" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The repository this build was triggered from
|
||||||
|
variable "SOURCE_REPOSITORY_URL" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The commit hash of of the current commit this build was triggered on
|
||||||
|
variable "SOURCE_COMMIT" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The version of this build
|
||||||
|
// Typically the current exact tag of this commit,
|
||||||
|
// else the last tag and the first 8 characters of the source commit
|
||||||
|
variable "SOURCE_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// This can be used to overwrite SOURCE_VERSION
|
||||||
|
// It will be used during the build.rs building stage
|
||||||
|
variable "VW_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The base tag(s) to use
|
||||||
|
// This can be a comma separated value like "testing,1.29.2"
|
||||||
|
variable "BASE_TAGS" {
|
||||||
|
default = "testing"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Which container registries should be used for the tagging
|
||||||
|
// This can be a comma separated value
|
||||||
|
// Use a full URI like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server`
|
||||||
|
variable "CONTAINER_REGISTRIES" {
|
||||||
|
default = "vaultwarden/server"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking Groups ====
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["debian"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Shared Baking ====
|
||||||
|
function "labels" {
|
||||||
|
params = []
|
||||||
|
result = {
|
||||||
|
"org.opencontainers.image.description" = "Unofficial Bitwarden compatible server written in Rust - ${SOURCE_VERSION}"
|
||||||
|
"org.opencontainers.image.licenses" = "AGPL-3.0-only"
|
||||||
|
"org.opencontainers.image.documentation" = "https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
"org.opencontainers.image.url" = "https://github.com/dani-garcia/vaultwarden"
|
||||||
|
"org.opencontainers.image.created" = "${formatdate("YYYY-MM-DD'T'hh:mm:ssZZZZZ", timestamp())}"
|
||||||
|
"org.opencontainers.image.source" = "${SOURCE_REPOSITORY_URL}"
|
||||||
|
"org.opencontainers.image.revision" = "${SOURCE_COMMIT}"
|
||||||
|
"org.opencontainers.image.version" = "${SOURCE_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "_default_attributes" {
|
||||||
|
labels = labels()
|
||||||
|
args = {
|
||||||
|
DB = "${DB}"
|
||||||
|
CARGO_PROFILE = "${CARGO_PROFILE}"
|
||||||
|
VW_VERSION = "${VW_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Debian Baking ====
|
||||||
|
|
||||||
|
// Default Debian target, will build a container using the hosts platform architecture
|
||||||
|
target "debian" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.debian"
|
||||||
|
tags = generate_tags("", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "debian-multi" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "debian-amd64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-arm64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv7" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv6" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "debian-all" {
|
||||||
|
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Alpine Baking ====
|
||||||
|
|
||||||
|
// Default Alpine target, will build a container using the hosts platform architecture
|
||||||
|
target "alpine" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.alpine"
|
||||||
|
tags = generate_tags("-alpine", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "alpine-multi" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "alpine-amd64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("-alpine", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-arm64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("-alpine", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv7" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("-alpine", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv6" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "alpine-all" {
|
||||||
|
targets = ["alpine-amd64", "alpine-arm64", "alpine-armv7", "alpine-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Bake everything locally ====
|
||||||
|
|
||||||
|
group "all" {
|
||||||
|
targets = ["debian-all", "alpine-all"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking functions ====
|
||||||
|
|
||||||
|
// This will return the local platform as amd64, arm64 or armv7 for example
|
||||||
|
// It can be used for creating a local image tag
|
||||||
|
function "platform_tag" {
|
||||||
|
params = []
|
||||||
|
result = "-${replace(replace(BAKE_LOCAL_PLATFORM, "linux/", ""), "/", "")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function "get_container_registries" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", CONTAINER_REGISTRIES))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "get_base_tags" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", BASE_TAGS))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "generate_tags" {
|
||||||
|
params = [
|
||||||
|
suffix, // What to append to the BASE_TAG when needed, like `-alpine` for example
|
||||||
|
platform // the platform we are building for if needed
|
||||||
|
]
|
||||||
|
result = flatten([
|
||||||
|
for registry in get_container_registries() :
|
||||||
|
[for base_tag in get_base_tags() :
|
||||||
|
concat(
|
||||||
|
# If the base_tag contains latest, and the suffix contains `-alpine` add a `:alpine` tag too
|
||||||
|
base_tag == "latest" ? suffix == "-alpine" ? ["${registry}:alpine${platform}"] : [] : [],
|
||||||
|
# The default tagging strategy
|
||||||
|
["${registry}:${base_tag}${suffix}${platform}"]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
])
|
||||||
|
}
|
||||||
|
|
||||||
|
function "image_index_annotations" {
|
||||||
|
params = []
|
||||||
|
result = flatten([
|
||||||
|
for key, value in labels() :
|
||||||
|
value != null ? formatlist("annotation-index.%s=%s", "${key}", "${value}") : []
|
||||||
|
])
|
||||||
|
}
|
@@ -10,7 +10,7 @@ CONFIG_FILE="${DATA_FOLDER}"/config.json
|
|||||||
# Given a config key, return the corresponding config value from the
|
# Given a config key, return the corresponding config value from the
|
||||||
# config file. If the key doesn't exist, return an empty string.
|
# config file. If the key doesn't exist, return an empty string.
|
||||||
get_config_val() {
|
get_config_val() {
|
||||||
local key="$1"
|
key="$1"
|
||||||
# Extract a line of the form:
|
# Extract a line of the form:
|
||||||
# "domain": "https://bw.example.com/path",
|
# "domain": "https://bw.example.com/path",
|
||||||
grep "\"${key}\":" "${CONFIG_FILE}" |
|
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||||
|
105
docker/podman-bake.sh
Executable file
105
docker/podman-bake.sh
Executable file
@@ -0,0 +1,105 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Check if a target is given as first argument
|
||||||
|
# If not we assume the defaults and pass the given arguments to the podman command
|
||||||
|
case "${1}" in
|
||||||
|
alpine*|debian*)
|
||||||
|
TARGET="${1}"
|
||||||
|
# Now shift the $@ array so we only have the rest of the arguments
|
||||||
|
# This allows us too append these as extra arguments too the podman buildx build command
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
LABEL_ARGS=(
|
||||||
|
--label org.opencontainers.image.description="Unofficial Bitwarden compatible server written in Rust"
|
||||||
|
--label org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||||
|
--label org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
--label org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||||
|
--label org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
|
)
|
||||||
|
if [[ -n "${SOURCE_REPOSITORY_URL}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_COMMIT}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.revision="${SOURCE_COMMIT}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_VERSION}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.version="${SOURCE_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if and which --build-arg arguments we need to configure
|
||||||
|
BUILD_ARGS=()
|
||||||
|
if [[ -n "${DB}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg DB="${DB}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${CARGO_PROFILE}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg CARGO_PROFILE="${CARGO_PROFILE}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg VW_VERSION="${VW_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default BASE_TAGS if non are provided
|
||||||
|
if [[ -z "${BASE_TAGS}" ]]; then
|
||||||
|
BASE_TAGS="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default CONTAINER_REGISTRIES if non are provided
|
||||||
|
if [[ -z "${CONTAINER_REGISTRIES}" ]]; then
|
||||||
|
CONTAINER_REGISTRIES="vaultwarden/server"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check which Dockerfile we need to use, default is debian
|
||||||
|
case "${TARGET}" in
|
||||||
|
alpine*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-alpine"
|
||||||
|
DOCKERFILE="Dockerfile.alpine"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
DOCKERFILE="Dockerfile.debian"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Check which platform we need to build and append the BASE_TAGS with the architecture
|
||||||
|
case "${TARGET}" in
|
||||||
|
*-arm64)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-arm64"
|
||||||
|
PLATFORM="linux/arm64"
|
||||||
|
;;
|
||||||
|
*-armv7)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv7"
|
||||||
|
PLATFORM="linux/arm/v7"
|
||||||
|
;;
|
||||||
|
*-armv6)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv6"
|
||||||
|
PLATFORM="linux/arm/v6"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-amd64"
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Build the image with podman
|
||||||
|
# We use the docker format here since we are using `SHELL`, which is not supported by OCI
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
podman buildx build \
|
||||||
|
--platform="${PLATFORM}" \
|
||||||
|
--tag="${CONTAINER_REGISTRIES}:${BASE_TAGS}" \
|
||||||
|
--format=docker \
|
||||||
|
"${LABEL_ARGS[@]}" \
|
||||||
|
"${BUILD_ARGS[@]}" \
|
||||||
|
--file="${BASEDIR}/${DOCKERFILE}" "$@" \
|
||||||
|
"${BASEDIR}/.."
|
@@ -1,17 +1,31 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import os, argparse, json
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
|
# Load settings file
|
||||||
|
with open("DockerSettings.yaml", 'r') as yaml_file:
|
||||||
|
yaml_data = yaml.safe_load(yaml_file)
|
||||||
|
|
||||||
|
settings_env = jinja2.Environment(
|
||||||
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
|
)
|
||||||
|
settings_yaml = yaml.safe_load(settings_env.get_template("DockerSettings.yaml").render(yaml_data))
|
||||||
|
|
||||||
args_parser = argparse.ArgumentParser()
|
args_parser = argparse.ArgumentParser()
|
||||||
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
||||||
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
||||||
cli_args = args_parser.parse_args()
|
cli_args = args_parser.parse_args()
|
||||||
|
|
||||||
|
# Merge the default config yaml with the json arguments given.
|
||||||
render_vars = json.loads(cli_args.render_vars)
|
render_vars = json.loads(cli_args.render_vars)
|
||||||
|
settings_yaml.update(render_vars)
|
||||||
|
|
||||||
environment = jinja2.Environment(
|
environment = jinja2.Environment(
|
||||||
loader=jinja2.FileSystemLoader(os.getcwd()),
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
trim_blocks=True,
|
trim_blocks=True,
|
||||||
)
|
)
|
||||||
print(environment.get_template(cli_args.template_file).render(render_vars))
|
print(environment.get_template(cli_args.template_file).render(settings_yaml))
|
||||||
|
@@ -1,20 +0,0 @@
|
|||||||
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
|
||||||
|
|
||||||
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
|
||||||
|
|
||||||
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
|
||||||
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
|
||||||
* `COMMIT_MSG`: the message from the commit being tested and built.
|
|
||||||
* `DOCKER_REPO`: the name of the Docker repository being built.
|
|
||||||
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
|
||||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
|
||||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
|
||||||
|
|
||||||
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
|
||||||
|
|
||||||
## References
|
|
||||||
|
|
||||||
* https://docs.docker.com/docker-hub/builds/advanced/
|
|
||||||
* https://docs.docker.com/engine/reference/commandline/manifest/
|
|
||||||
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
|
||||||
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
|
@@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# The default Debian-based images support these arches for all database backends.
|
|
||||||
arches=(
|
|
||||||
amd64
|
|
||||||
armv6
|
|
||||||
armv7
|
|
||||||
arm64
|
|
||||||
)
|
|
||||||
export arches
|
|
||||||
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
distro_suffix=.alpine
|
|
||||||
fi
|
|
||||||
export distro_suffix
|
|
51
hooks/build
51
hooks/build
@@ -1,51 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
echo ">>> Building images..."
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
|
||||||
# This var is typically predefined by Docker Hub, but it won't be
|
|
||||||
# when testing locally.
|
|
||||||
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Construct a version string in the style of `build.rs`.
|
|
||||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
|
||||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
|
||||||
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
|
||||||
else
|
|
||||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
|
||||||
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
LABELS=(
|
|
||||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
|
||||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
|
||||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
|
||||||
org.opencontainers.image.licenses="AGPL-3.0-only"
|
|
||||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
|
||||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
|
||||||
org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
|
||||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
|
||||||
)
|
|
||||||
LABEL_ARGS=()
|
|
||||||
for label in "${LABELS[@]}"; do
|
|
||||||
LABEL_ARGS+=(--label "${label}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildkit as template
|
|
||||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
|
||||||
buildkit_suffix=.buildkit
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker build \
|
|
||||||
"${LABEL_ARGS[@]}" \
|
|
||||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
|
||||||
-f "docker/${arch}/Dockerfile${buildkit_suffix}${distro_suffix}" \
|
|
||||||
.
|
|
||||||
done
|
|
@@ -1,28 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# If requested, print some environment info for troubleshooting.
|
|
||||||
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
|
||||||
id
|
|
||||||
pwd
|
|
||||||
df -h
|
|
||||||
env
|
|
||||||
docker info
|
|
||||||
docker version
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install build dependencies.
|
|
||||||
deps=(
|
|
||||||
jq
|
|
||||||
)
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y "${deps[@]}"
|
|
||||||
|
|
||||||
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
|
||||||
# Git operations that we perform later, so fetch the complete history and
|
|
||||||
# tags first. Note that if the build is cached, the clone may have been
|
|
||||||
# unshallowed already; if so, unshallowing will fail, so skip it.
|
|
||||||
if [[ -f .git/shallow ]]; then
|
|
||||||
git fetch --unshallow --tags
|
|
||||||
fi
|
|
111
hooks/push
111
hooks/push
@@ -1,111 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
|
||||||
|
|
||||||
# Join a list of args with a single char.
|
|
||||||
# Ref: https://stackoverflow.com/a/17841619
|
|
||||||
join() { local IFS="$1"; shift; echo "$*"; }
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
echo ">>> Starting local Docker registry when needed..."
|
|
||||||
|
|
||||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
|
||||||
# builds, but it can't access existing images on the Docker host (like the
|
|
||||||
# cross-compiled ones we just built). Those images first need to be pushed to
|
|
||||||
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
|
||||||
# up those intermediate images on Docker Hub, it's easier to just run a local
|
|
||||||
# Docker registry, which gets cleaned up automatically once the build job ends.
|
|
||||||
#
|
|
||||||
# https://docs.docker.com/registry/deploying/
|
|
||||||
# https://hub.docker.com/_/registry
|
|
||||||
#
|
|
||||||
# Use host networking so the buildx container can access the registry via
|
|
||||||
# localhost.
|
|
||||||
#
|
|
||||||
# First check if there already is a registry container running, else skip it.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5000'; then
|
|
||||||
# defaults to port 5000
|
|
||||||
docker run -d --name registry --network host registry:2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
|
||||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
|
||||||
LOCAL_REGISTRY="localhost:5000"
|
|
||||||
REPO="${DOCKER_REPO#*/}"
|
|
||||||
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
|
||||||
|
|
||||||
echo ">>> Pushing images to local registry..."
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
docker tag "${docker_image}" "${local_image}"
|
|
||||||
docker push "${local_image}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ">>> Setting up Docker Buildx..."
|
|
||||||
|
|
||||||
# Same as earlier, use host networking so the buildx container can access the
|
|
||||||
# registry via localhost.
|
|
||||||
#
|
|
||||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
|
||||||
#
|
|
||||||
# Check if there already is a builder running, else skip this and use the existing.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! docker buildx inspect builder > /dev/null 2>&1 ; then
|
|
||||||
docker buildx create --name builder --use --driver-opt network=host
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ">>> Running Docker Buildx..."
|
|
||||||
|
|
||||||
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
|
||||||
|
|
||||||
# If the Docker tag starts with a version number, assume the latest release
|
|
||||||
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
|
||||||
# to make it easier for users to track the latest release.
|
|
||||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
tags+=("${DOCKER_REPO}:alpine")
|
|
||||||
else
|
|
||||||
tags+=("${DOCKER_REPO}:latest")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
tag_args=()
|
|
||||||
for tag in "${tags[@]}"; do
|
|
||||||
tag_args+=(--tag "${tag}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
|
||||||
# the arch list to a platform list (assuming the OS is always `linux`).
|
|
||||||
declare -A arch_to_platform=(
|
|
||||||
[amd64]="linux/amd64"
|
|
||||||
[armv6]="linux/arm/v6"
|
|
||||||
[armv7]="linux/arm/v7"
|
|
||||||
[arm64]="linux/arm64"
|
|
||||||
)
|
|
||||||
platforms=()
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
platforms+=("${arch_to_platform[$arch]}")
|
|
||||||
done
|
|
||||||
platform="$(join "," "${platforms[@]}")"
|
|
||||||
|
|
||||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
|
||||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
|
||||||
# context, which isn't needed here since the actual cross-compiled images
|
|
||||||
# have already been built.
|
|
||||||
docker buildx build \
|
|
||||||
--network host \
|
|
||||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
|
||||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
|
||||||
--platform "${platform}" \
|
|
||||||
"${tag_args[@]}" \
|
|
||||||
--push \
|
|
||||||
- < ./docker/Dockerfile.buildx
|
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY master_password_hash TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY enc_key TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN `key` TEXT;
|
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN master_password_hash DROP NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN enc_key DROP NOT NULL;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
@@ -0,0 +1,29 @@
|
|||||||
|
-- Create new auth_requests table with master_password_hash as nullable column
|
||||||
|
CREATE TABLE auth_requests_new (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
organization_uuid TEXT,
|
||||||
|
request_device_identifier TEXT NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id TEXT,
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT,
|
||||||
|
master_password_hash TEXT,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY (user_uuid) REFERENCES users (uuid),
|
||||||
|
FOREIGN KEY (organization_uuid) REFERENCES organizations (uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer current data to new table
|
||||||
|
INSERT INTO auth_requests_new SELECT * FROM auth_requests;
|
||||||
|
|
||||||
|
-- Drop the old table
|
||||||
|
DROP TABLE auth_requests;
|
||||||
|
|
||||||
|
-- Rename the new table to the original name
|
||||||
|
ALTER TABLE auth_requests_new RENAME TO auth_requests;
|
@@ -0,0 +1,2 @@
|
|||||||
|
-- Add the external_id to the users_organizations table
|
||||||
|
ALTER TABLE "users_organizations" ADD COLUMN "external_id" TEXT;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
@@ -1 +0,0 @@
|
|||||||
1.71.1
|
|
4
rust-toolchain.toml
Normal file
4
rust-toolchain.toml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[toolchain]
|
||||||
|
channel = "1.73.0"
|
||||||
|
components = [ "rustfmt", "clippy" ]
|
||||||
|
profile = "minimal"
|
@@ -184,12 +184,11 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
|||||||
let claims = generate_admin_claims();
|
let claims = generate_admin_claims();
|
||||||
let jwt = encode_jwt(&claims);
|
let jwt = encode_jwt(&claims);
|
||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true);
|
||||||
.finish();
|
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
if let Some(redirect) = redirect {
|
if let Some(redirect) = redirect {
|
||||||
@@ -279,12 +278,11 @@ async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
|||||||
#[post("/invite", data = "<data>")]
|
#[post("/invite", data = "<data>")]
|
||||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
let email = data.email.clone();
|
|
||||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||||
err_code!("User already exists", Status::Conflict.code)
|
err_code!("User already exists", Status::Conflict.code)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = User::new(email);
|
let mut user = User::new(data.email);
|
||||||
|
|
||||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
@@ -314,7 +312,7 @@ async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
|||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
Redirect::to(admin_path())
|
Redirect::to(admin_path())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,6 +324,10 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
|||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["UserEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
|
usr["LastActive"] = match u.last_active(&mut conn).await {
|
||||||
|
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||||
|
None => json!(None::<String>),
|
||||||
|
};
|
||||||
users_json.push(usr);
|
users_json.push(usr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -783,16 +785,16 @@ impl<'r> FromRequest<'r> for AdminToken {
|
|||||||
if requested_page.is_empty() {
|
if requested_page.is_empty() {
|
||||||
return Outcome::Forward(Status::Unauthorized);
|
return Outcome::Forward(Status::Unauthorized);
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::Unauthorized, "Unauthorized"));
|
return Outcome::Error((Status::Unauthorized, "Unauthorized"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if decode_admin(access_token).is_err() {
|
if decode_admin(access_token).is_err() {
|
||||||
// Remove admin cookie
|
// Remove admin cookie
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
return Outcome::Error((Status::Unauthorized, "Session expired"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
|
@@ -6,7 +6,7 @@ use serde_json::Value;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult,
|
core::log_user_event, register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult,
|
||||||
JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType,
|
JsonUpcase, Notify, NumberOrString, PasswordOrOtpData, UpdateType,
|
||||||
},
|
},
|
||||||
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
|
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
@@ -346,7 +346,7 @@ async fn post_password(
|
|||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
let save_result = user.save(&mut conn).await;
|
||||||
|
|
||||||
// Prevent loging out the client where the user requested this endpoint from.
|
// Prevent logging out the client where the user requested this endpoint from.
|
||||||
// If you do logout the user it will causes issues at the client side.
|
// If you do logout the user it will causes issues at the client side.
|
||||||
// Adding the device uuid will prevent this.
|
// Adding the device uuid will prevent this.
|
||||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||||
@@ -493,7 +493,7 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
|||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
let save_result = user.save(&mut conn).await;
|
||||||
|
|
||||||
// Prevent loging out the client where the user requested this endpoint from.
|
// Prevent logging out the client where the user requested this endpoint from.
|
||||||
// If you do logout the user it will causes issues at the client side.
|
// If you do logout the user it will causes issues at the client side.
|
||||||
// Adding the device uuid will prevent this.
|
// Adding the device uuid will prevent this.
|
||||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||||
@@ -503,17 +503,15 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
|||||||
|
|
||||||
#[post("/accounts/security-stamp", data = "<data>")]
|
#[post("/accounts/security-stamp", data = "<data>")]
|
||||||
async fn post_sstamp(
|
async fn post_sstamp(
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
@@ -533,6 +531,10 @@ struct EmailTokenData {
|
|||||||
|
|
||||||
#[post("/accounts/email-token", data = "<data>")]
|
#[post("/accounts/email-token", data = "<data>")]
|
||||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.email_change_allowed() {
|
||||||
|
err!("Email change is not allowed.");
|
||||||
|
}
|
||||||
|
|
||||||
let data: EmailTokenData = data.into_inner().data;
|
let data: EmailTokenData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -579,6 +581,10 @@ async fn post_email(
|
|||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
if !CONFIG.email_change_allowed() {
|
||||||
|
err!("Email change is not allowed.");
|
||||||
|
}
|
||||||
|
|
||||||
let data: ChangeEmailData = data.into_inner().data;
|
let data: ChangeEmailData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
@@ -728,18 +734,16 @@ async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, mut
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete", data = "<data>")]
|
#[post("/accounts/delete", data = "<data>")]
|
||||||
async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_delete_account(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
delete_account(data, headers, conn).await
|
delete_account(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/accounts", data = "<data>")]
|
#[delete("/accounts", data = "<data>")]
|
||||||
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn delete_account(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
user.delete(&mut conn).await
|
user.delete(&mut conn).await
|
||||||
}
|
}
|
||||||
@@ -846,20 +850,13 @@ fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _api_key(
|
async fn _api_key(data: JsonUpcase<PasswordOrOtpData>, rotate: bool, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
data: JsonUpcase<SecretVerificationRequest>,
|
|
||||||
rotate: bool,
|
|
||||||
headers: Headers,
|
|
||||||
mut conn: DbConn,
|
|
||||||
) -> JsonResult {
|
|
||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
let data: SecretVerificationRequest = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
if rotate || user.api_key.is_none() {
|
if rotate || user.api_key.is_none() {
|
||||||
user.api_key = Some(crypto::generate_api_key());
|
user.api_key = Some(crypto::generate_api_key());
|
||||||
@@ -874,12 +871,12 @@ async fn _api_key(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/api-key", data = "<data>")]
|
#[post("/accounts/api-key", data = "<data>")]
|
||||||
async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn api_key(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
_api_key(data, false, headers, conn).await
|
_api_key(data, false, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/rotate-api-key", data = "<data>")]
|
#[post("/accounts/rotate-api-key", data = "<data>")]
|
||||||
async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn rotate_api_key(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
_api_key(data, true, headers, conn).await
|
_api_key(data, true, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -913,26 +910,23 @@ impl<'r> FromRequest<'r> for KnownDevice {
|
|||||||
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
||||||
Ok(bytes) => bytes,
|
Ok(bytes) => bytes,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Outcome::Failure((
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
|
||||||
Status::BadRequest,
|
|
||||||
"X-Request-Email value failed to decode as base64url",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
match String::from_utf8(email_bytes) {
|
match String::from_utf8(email_bytes) {
|
||||||
Ok(email) => email,
|
Ok(email) => email,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value is required"));
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value is required"));
|
||||||
};
|
};
|
||||||
|
|
||||||
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
||||||
uuid.to_string()
|
uuid.to_string()
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Device-Identifier value is required"));
|
return Outcome::Error((Status::BadRequest, "X-Device-Identifier value is required"));
|
||||||
};
|
};
|
||||||
|
|
||||||
Outcome::Success(KnownDevice {
|
Outcome::Success(KnownDevice {
|
||||||
@@ -970,10 +964,10 @@ async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Head
|
|||||||
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
||||||
}
|
}
|
||||||
if let Err(e) = device.save(&mut conn).await {
|
if let Err(e) = device.save(&mut conn).await {
|
||||||
err!(format!("An error occured while trying to save the device push token: {e}"));
|
err!(format!("An error occurred while trying to save the device push token: {e}"));
|
||||||
}
|
}
|
||||||
if let Err(e) = register_push_device(headers.user.uuid, device).await {
|
if let Err(e) = register_push_device(headers.user.uuid, device).await {
|
||||||
err!(format!("An error occured while proceeding registration of a device: {e}"));
|
err!(format!("An error occurred while proceeding registration of a device: {e}"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -1090,7 +1084,7 @@ async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult {
|
|||||||
struct AuthResponseRequest {
|
struct AuthResponseRequest {
|
||||||
deviceIdentifier: String,
|
deviceIdentifier: String,
|
||||||
key: String,
|
key: String,
|
||||||
masterPasswordHash: String,
|
masterPasswordHash: Option<String>,
|
||||||
requestApproved: bool,
|
requestApproved: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1111,7 +1105,7 @@ async fn put_auth_request(
|
|||||||
};
|
};
|
||||||
|
|
||||||
auth_request.approved = Some(data.requestApproved);
|
auth_request.approved = Some(data.requestApproved);
|
||||||
auth_request.enc_key = data.key;
|
auth_request.enc_key = Some(data.key);
|
||||||
auth_request.master_password_hash = data.masterPasswordHash;
|
auth_request.master_password_hash = data.masterPasswordHash;
|
||||||
auth_request.response_device_id = Some(data.deviceIdentifier.clone());
|
auth_request.response_device_id = Some(data.deviceIdentifier.clone());
|
||||||
auth_request.save(&mut conn).await?;
|
auth_request.save(&mut conn).await?;
|
||||||
|
@@ -10,7 +10,7 @@ use rocket::{
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
@@ -206,12 +206,13 @@ pub struct CipherData {
|
|||||||
// TODO: Some of these might appear all the time, no need for Option
|
// TODO: Some of these might appear all the time, no need for Option
|
||||||
OrganizationId: Option<String>,
|
OrganizationId: Option<String>,
|
||||||
|
|
||||||
|
Key: Option<String>,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Login = 1,
|
Login = 1,
|
||||||
SecureNote = 2,
|
SecureNote = 2,
|
||||||
Card = 3,
|
Card = 3,
|
||||||
Identity = 4,
|
Identity = 4
|
||||||
Fido2Key = 5
|
|
||||||
*/
|
*/
|
||||||
pub Type: i32,
|
pub Type: i32,
|
||||||
pub Name: String,
|
pub Name: String,
|
||||||
@@ -223,7 +224,6 @@ pub struct CipherData {
|
|||||||
SecureNote: Option<Value>,
|
SecureNote: Option<Value>,
|
||||||
Card: Option<Value>,
|
Card: Option<Value>,
|
||||||
Identity: Option<Value>,
|
Identity: Option<Value>,
|
||||||
Fido2Key: Option<Value>,
|
|
||||||
|
|
||||||
Favorite: Option<bool>,
|
Favorite: Option<bool>,
|
||||||
Reprompt: Option<i32>,
|
Reprompt: Option<i32>,
|
||||||
@@ -359,14 +359,17 @@ pub async fn update_cipher_from_data(
|
|||||||
enforce_personal_ownership_policy(Some(&data), headers, conn).await?;
|
enforce_personal_ownership_policy(Some(&data), headers, conn).await?;
|
||||||
|
|
||||||
// Check that the client isn't updating an existing cipher with stale data.
|
// Check that the client isn't updating an existing cipher with stale data.
|
||||||
if let Some(dt) = data.LastKnownRevisionDate {
|
// And only perform this check when not importing ciphers, else the date/time check will fail.
|
||||||
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
if ut != UpdateType::None {
|
||||||
// ISO 8601 format
|
if let Some(dt) = data.LastKnownRevisionDate {
|
||||||
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
// ISO 8601 format
|
||||||
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||||
|
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||||
|
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||||
|
}
|
||||||
|
Ok(_) => (),
|
||||||
}
|
}
|
||||||
Ok(_) => (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -466,7 +469,6 @@ pub async fn update_cipher_from_data(
|
|||||||
2 => data.SecureNote,
|
2 => data.SecureNote,
|
||||||
3 => data.Card,
|
3 => data.Card,
|
||||||
4 => data.Identity,
|
4 => data.Identity,
|
||||||
5 => data.Fido2Key,
|
|
||||||
_ => err!("Invalid type"),
|
_ => err!("Invalid type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -483,6 +485,7 @@ pub async fn update_cipher_from_data(
|
|||||||
None => err!("Data missing"),
|
None => err!("Data missing"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
cipher.key = data.Key;
|
||||||
cipher.name = data.Name;
|
cipher.name = data.Name;
|
||||||
cipher.notes = data.Notes;
|
cipher.notes = data.Notes;
|
||||||
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
||||||
@@ -1454,19 +1457,15 @@ struct OrganizationId {
|
|||||||
#[post("/ciphers/purge?<organization..>", data = "<data>")]
|
#[post("/ciphers/purge?<organization..>", data = "<data>")]
|
||||||
async fn delete_all(
|
async fn delete_all(
|
||||||
organization: Option<OrganizationId>,
|
organization: Option<OrganizationId>,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
match organization {
|
match organization {
|
||||||
Some(org_data) => {
|
Some(org_data) => {
|
||||||
@@ -1752,7 +1751,7 @@ impl CipherSyncData {
|
|||||||
let cipher_folders: HashMap<String, String>;
|
let cipher_folders: HashMap<String, String>;
|
||||||
let cipher_favorites: HashSet<String>;
|
let cipher_favorites: HashSet<String>;
|
||||||
match sync_type {
|
match sync_type {
|
||||||
// User Sync supports Folders and Favorits
|
// User Sync supports Folders and Favorites
|
||||||
CipherSyncType::User => {
|
CipherSyncType::User => {
|
||||||
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
|
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
|
||||||
cipher_folders = FolderCipher::find_by_user(user_uuid, conn).await.into_iter().collect();
|
cipher_folders = FolderCipher::find_by_user(user_uuid, conn).await.into_iter().collect();
|
||||||
@@ -1760,7 +1759,7 @@ impl CipherSyncData {
|
|||||||
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
|
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
|
||||||
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await.into_iter().collect();
|
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await.into_iter().collect();
|
||||||
}
|
}
|
||||||
// Organization Sync does not support Folders and Favorits.
|
// Organization Sync does not support Folders and Favorites.
|
||||||
// If these are set, it will cause issues in the web-vault.
|
// If these are set, it will cause issues in the web-vault.
|
||||||
CipherSyncType::Organization => {
|
CipherSyncType::Organization => {
|
||||||
cipher_folders = HashMap::with_capacity(0);
|
cipher_folders = HashMap::with_capacity(0);
|
||||||
@@ -1805,7 +1804,7 @@ impl CipherSyncData {
|
|||||||
.map(|collection_group| (collection_group.collections_uuid.clone(), collection_group))
|
.map(|collection_group| (collection_group.collections_uuid.clone(), collection_group))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// Get all organizations that the user has full access to via group assignement
|
// Get all organizations that the user has full access to via group assignment
|
||||||
let user_group_full_access_for_organizations: HashSet<String> =
|
let user_group_full_access_for_organizations: HashSet<String> =
|
||||||
Group::gather_user_organizations_full_access(user_uuid, conn).await.into_iter().collect();
|
Group::gather_user_organizations_full_access(user_uuid, conn).await.into_iter().collect();
|
||||||
|
|
||||||
|
@@ -319,7 +319,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||||||
let claims = decode_emergency_access_invite(token)?;
|
let claims = decode_emergency_access_invite(token)?;
|
||||||
|
|
||||||
// This can happen if the user who received the invite used a different email to signup.
|
// This can happen if the user who received the invite used a different email to signup.
|
||||||
// Since we do not know if this is intented, we error out here and do nothing with the invite.
|
// Since we do not know if this is intended, we error out here and do nothing with the invite.
|
||||||
if claims.email != headers.user.email {
|
if claims.email != headers.user.email {
|
||||||
err!("Claim email does not match current users email")
|
err!("Claim email does not match current users email")
|
||||||
}
|
}
|
||||||
|
@@ -194,11 +194,17 @@ fn version() -> Json<&'static str> {
|
|||||||
fn config() -> Json<Value> {
|
fn config() -> Json<Value> {
|
||||||
let domain = crate::CONFIG.domain();
|
let domain = crate::CONFIG.domain();
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"version": crate::VERSION,
|
// Note: The clients use this version to handle backwards compatibility concerns
|
||||||
|
// This means they expect a version that closely matches the Bitwarden server version
|
||||||
|
// We should make sure that we keep this updated when we support the new server features
|
||||||
|
// Version history:
|
||||||
|
// - Individual cipher key encryption: 2023.9.1
|
||||||
|
"version": "2023.9.1",
|
||||||
"gitHash": option_env!("GIT_REV"),
|
"gitHash": option_env!("GIT_REV"),
|
||||||
"server": {
|
"server": {
|
||||||
"name": "Vaultwarden",
|
"name": "Vaultwarden",
|
||||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
"url": "https://github.com/dani-garcia/vaultwarden",
|
||||||
|
"version": crate::VERSION
|
||||||
},
|
},
|
||||||
"environment": {
|
"environment": {
|
||||||
"vault": domain,
|
"vault": domain,
|
||||||
@@ -207,6 +213,13 @@ fn config() -> Json<Value> {
|
|||||||
"notifications": format!("{domain}/notifications"),
|
"notifications": format!("{domain}/notifications"),
|
||||||
"sso": "",
|
"sso": "",
|
||||||
},
|
},
|
||||||
|
"featureStates": {
|
||||||
|
// Any feature flags that we want the clients to use
|
||||||
|
// Can check the enabled ones at:
|
||||||
|
// https://vault.bitwarden.com/api/config
|
||||||
|
"fido2-vault-credentials": true, // Passkey support
|
||||||
|
"autofill-v2": false, // Disabled because it is causing issues https://github.com/dani-garcia/vaultwarden/discussions/4052
|
||||||
|
},
|
||||||
"object": "config",
|
"object": "config",
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@@ -6,7 +6,8 @@ use serde_json::Value;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_event, CipherSyncData, CipherSyncType},
|
core::{log_event, CipherSyncData, CipherSyncType},
|
||||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordData, UpdateType,
|
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordOrOtpData,
|
||||||
|
UpdateType,
|
||||||
},
|
},
|
||||||
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
@@ -186,16 +187,13 @@ async fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, mut co
|
|||||||
#[delete("/organizations/<org_id>", data = "<data>")]
|
#[delete("/organizations/<org_id>", data = "<data>")]
|
||||||
async fn delete_organization(
|
async fn delete_organization(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: OwnerHeaders,
|
headers: OwnerHeaders,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&password_hash) {
|
data.validate(&headers.user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
match Organization::find_by_uuid(org_id, &mut conn).await {
|
match Organization::find_by_uuid(org_id, &mut conn).await {
|
||||||
None => err!("Organization not found"),
|
None => err!("Organization not found"),
|
||||||
@@ -206,7 +204,7 @@ async fn delete_organization(
|
|||||||
#[post("/organizations/<org_id>/delete", data = "<data>")]
|
#[post("/organizations/<org_id>/delete", data = "<data>")]
|
||||||
async fn post_delete_organization(
|
async fn post_delete_organization(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: OwnerHeaders,
|
headers: OwnerHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
@@ -1520,9 +1518,9 @@ async fn bulk_public_keys(
|
|||||||
let data: OrgBulkIds = data.into_inner().data;
|
let data: OrgBulkIds = data.into_inner().data;
|
||||||
|
|
||||||
let mut bulk_response = Vec::new();
|
let mut bulk_response = Vec::new();
|
||||||
// Check all received UserOrg UUID's and find the matching User to retreive the public-key.
|
// Check all received UserOrg UUID's and find the matching User to retrieve the public-key.
|
||||||
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
|
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
|
||||||
// The web-vault will then ignore that user for the folowing steps.
|
// The web-vault will then ignore that user for the following steps.
|
||||||
for user_org_id in data.Ids {
|
for user_org_id in data.Ids {
|
||||||
match UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &mut conn).await {
|
match UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &mut conn).await {
|
||||||
Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &mut conn).await {
|
Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &mut conn).await {
|
||||||
@@ -1882,7 +1880,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
|||||||
// This means that this endpoint can end up removing users that were added manually by an admin,
|
// This means that this endpoint can end up removing users that were added manually by an admin,
|
||||||
// as opposed to upstream which only removes auto-imported users.
|
// as opposed to upstream which only removes auto-imported users.
|
||||||
|
|
||||||
// User needs to be admin or owner to use the Directry Connector
|
// User needs to be admin or owner to use the Directory Connector
|
||||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
|
match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
|
||||||
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
||||||
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
||||||
@@ -2897,7 +2895,7 @@ async fn put_reset_password_enrollment(
|
|||||||
|
|
||||||
// This is a new function active since the v2022.9.x clients.
|
// This is a new function active since the v2022.9.x clients.
|
||||||
// It combines the previous two calls done before.
|
// It combines the previous two calls done before.
|
||||||
// We call those two functions here and combine them our selfs.
|
// We call those two functions here and combine them ourselves.
|
||||||
//
|
//
|
||||||
// NOTE: It seems clients can't handle uppercase-first keys!!
|
// NOTE: It seems clients can't handle uppercase-first keys!!
|
||||||
// We need to convert all keys so they have the first character to be a lowercase.
|
// We need to convert all keys so they have the first character to be a lowercase.
|
||||||
@@ -2945,18 +2943,16 @@ async fn get_org_export(org_id: &str, headers: AdminHeaders, mut conn: DbConn) -
|
|||||||
|
|
||||||
async fn _api_key(
|
async fn _api_key(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
rotate: bool,
|
rotate: bool,
|
||||||
headers: AdminHeaders,
|
headers: AdminHeaders,
|
||||||
conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
// Validate the admin users password
|
// Validate the admin users password/otp
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_id, &conn).await {
|
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_id, &conn).await {
|
||||||
Some(mut org_api_key) => {
|
Some(mut org_api_key) => {
|
||||||
@@ -2983,14 +2979,14 @@ async fn _api_key(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/api-key", data = "<data>")]
|
#[post("/organizations/<org_id>/api-key", data = "<data>")]
|
||||||
async fn api_key(org_id: &str, data: JsonUpcase<PasswordData>, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
async fn api_key(org_id: &str, data: JsonUpcase<PasswordOrOtpData>, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
_api_key(org_id, data, false, headers, conn).await
|
_api_key(org_id, data, false, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/rotate-api-key", data = "<data>")]
|
#[post("/organizations/<org_id>/rotate-api-key", data = "<data>")]
|
||||||
async fn rotate_api_key(
|
async fn rotate_api_key(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: AdminHeaders,
|
headers: AdminHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
|
@@ -56,16 +56,34 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
if let Some(mut user_org) =
|
if let Some(mut user_org) =
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||||
{
|
{
|
||||||
user_org.revoke();
|
// Only revoke a user if it is not the last confirmed owner
|
||||||
user_org.save(&mut conn).await?;
|
let revoked = if user_org.atype == UserOrgType::Owner
|
||||||
}
|
&& user_org.status == UserOrgStatus::Confirmed as i32
|
||||||
|
{
|
||||||
|
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn).await
|
||||||
|
<= 1
|
||||||
|
{
|
||||||
|
warn!("Can't revoke the last owner");
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
user_org.revoke()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
user_org.revoke()
|
||||||
|
};
|
||||||
|
|
||||||
|
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||||
|
if revoked || ext_modified {
|
||||||
|
user_org.save(&mut conn).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
// If user is part of the organization, restore it
|
// If user is part of the organization, restore it
|
||||||
} else if let Some(mut user_org) =
|
} else if let Some(mut user_org) =
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||||
{
|
{
|
||||||
if user_org.status < UserOrgStatus::Revoked as i32 {
|
let restored = user_org.restore();
|
||||||
user_org.restore();
|
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||||
|
if restored || ext_modified {
|
||||||
user_org.save(&mut conn).await?;
|
user_org.save(&mut conn).await?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -73,9 +91,8 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
||||||
Some(user) => user, // exists in vaultwarden
|
Some(user) => user, // exists in vaultwarden
|
||||||
None => {
|
None => {
|
||||||
// doesn't exist in vaultwarden
|
// User does not exist yet
|
||||||
let mut new_user = User::new(user_data.Email.clone());
|
let mut new_user = User::new(user_data.Email.clone());
|
||||||
new_user.set_external_id(Some(user_data.ExternalId.clone()));
|
|
||||||
new_user.save(&mut conn).await?;
|
new_user.save(&mut conn).await?;
|
||||||
|
|
||||||
if !CONFIG.mail_enabled() {
|
if !CONFIG.mail_enabled() {
|
||||||
@@ -92,6 +109,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||||
|
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
|
||||||
new_org_user.access_all = false;
|
new_org_user.access_all = false;
|
||||||
new_org_user.atype = UserOrgType::User as i32;
|
new_org_user.atype = UserOrgType::User as i32;
|
||||||
new_org_user.status = user_org_status;
|
new_org_user.status = user_org_status;
|
||||||
@@ -132,12 +150,10 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||||
|
|
||||||
for ext_id in &group_data.MemberExternalIds {
|
for ext_id in &group_data.MemberExternalIds {
|
||||||
if let Some(user) = User::find_by_external_id(ext_id, &mut conn).await {
|
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
|
||||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await
|
{
|
||||||
{
|
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
group_user.save(&mut conn).await?;
|
||||||
group_user.save(&mut conn).await?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -150,10 +166,8 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||||
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
||||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
||||||
if let Some(user_external_id) =
|
if let Some(ref user_external_id) = user_org.external_id {
|
||||||
User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.external_id)
|
if !sync_members.contains(user_external_id) {
|
||||||
{
|
|
||||||
if user_external_id.is_some() && !sync_members.contains(&user_external_id.unwrap()) {
|
|
||||||
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
||||||
// Removing owner, check that there is at least one other confirmed owner
|
// Removing owner, check that there is at least one other confirmed owner
|
||||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
||||||
|
@@ -340,9 +340,13 @@ async fn post_send_file_v2_data(
|
|||||||
|
|
||||||
let mut data = data.into_inner();
|
let mut data = data.into_inner();
|
||||||
|
|
||||||
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else { err!("Send not found. Unable to save the file.") };
|
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else {
|
||||||
|
err!("Send not found. Unable to save the file.")
|
||||||
|
};
|
||||||
|
|
||||||
let Some(send_user_id) = &send.user_uuid else {err!("Sends are only supported for users at the moment")};
|
let Some(send_user_id) = &send.user_uuid else {
|
||||||
|
err!("Sends are only supported for users at the moment")
|
||||||
|
};
|
||||||
if send_user_id != &headers.user.uuid {
|
if send_user_id != &headers.user.uuid {
|
||||||
err!("Send doesn't belong to user");
|
err!("Send doesn't belong to user");
|
||||||
}
|
}
|
||||||
|
@@ -5,7 +5,7 @@ use rocket::Route;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
||||||
NumberOrString, PasswordData,
|
NumberOrString, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::{ClientIp, Headers},
|
auth::{ClientIp, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
@@ -22,13 +22,11 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||||
async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let type_ = TwoFactorType::Authenticator as i32;
|
let type_ = TwoFactorType::Authenticator as i32;
|
||||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
||||||
@@ -48,9 +46,10 @@ async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers
|
|||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EnableAuthenticatorData {
|
struct EnableAuthenticatorData {
|
||||||
MasterPasswordHash: String,
|
|
||||||
Key: String,
|
Key: String,
|
||||||
Token: NumberOrString,
|
Token: NumberOrString,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/authenticator", data = "<data>")]
|
#[post("/two-factor/authenticator", data = "<data>")]
|
||||||
@@ -60,15 +59,17 @@ async fn activate_authenticator(
|
|||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
let key = data.Key;
|
let key = data.Key;
|
||||||
let token = data.Token.into_string();
|
let token = data.Token.into_string();
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Validate key as base32 and 20 bytes length
|
// Validate key as base32 and 20 bytes length
|
||||||
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
|
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
|
||||||
@@ -177,7 +178,7 @@ pub async fn validate_totp_code(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Else no valide code received, deny access
|
// Else no valid code received, deny access
|
||||||
err!(
|
err!(
|
||||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
|
@@ -6,7 +6,7 @@ use rocket::Route;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||||
PasswordData,
|
PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
@@ -92,14 +92,13 @@ impl DuoStatus {
|
|||||||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||||
|
|
||||||
#[post("/two-factor/get-duo", data = "<data>")]
|
#[post("/two-factor/get-duo", data = "<data>")]
|
||||||
async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let data = get_user_duo_data(&headers.user.uuid, &mut conn).await;
|
let data = get_user_duo_data(&user.uuid, &mut conn).await;
|
||||||
|
|
||||||
let (enabled, data) = match data {
|
let (enabled, data) = match data {
|
||||||
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
||||||
@@ -129,10 +128,11 @@ async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbC
|
|||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case, dead_code)]
|
#[allow(non_snake_case, dead_code)]
|
||||||
struct EnableDuoData {
|
struct EnableDuoData {
|
||||||
MasterPasswordHash: String,
|
|
||||||
Host: String,
|
Host: String,
|
||||||
SecretKey: String,
|
SecretKey: String,
|
||||||
IntegrationKey: String,
|
IntegrationKey: String,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<EnableDuoData> for DuoData {
|
impl From<EnableDuoData> for DuoData {
|
||||||
@@ -159,9 +159,12 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
|||||||
let data: EnableDuoData = data.into_inner().data;
|
let data: EnableDuoData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash.clone(),
|
||||||
|
Otp: data.Otp.clone(),
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let (data, data_str) = if check_duo_fields_custom(&data) {
|
let (data, data_str) = if check_duo_fields_custom(&data) {
|
||||||
let data_req: DuoData = data.into();
|
let data_req: DuoData = data.into();
|
||||||
|
@@ -5,7 +5,7 @@ use rocket::Route;
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
@@ -76,13 +76,11 @@ pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
|||||||
|
|
||||||
/// When user clicks on Manage email 2FA show the user the related information
|
/// When user clicks on Manage email 2FA show the user the related information
|
||||||
#[post("/two-factor/get-email", data = "<data>")]
|
#[post("/two-factor/get-email", data = "<data>")]
|
||||||
async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (enabled, mfa_email) =
|
let (enabled, mfa_email) =
|
||||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
||||||
@@ -105,7 +103,8 @@ async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: D
|
|||||||
struct SendEmailData {
|
struct SendEmailData {
|
||||||
/// Email where 2FA codes will be sent to, can be different than user email account.
|
/// Email where 2FA codes will be sent to, can be different than user email account.
|
||||||
Email: String,
|
Email: String,
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
||||||
@@ -114,9 +113,12 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||||||
let data: SendEmailData = data.into_inner().data;
|
let data: SendEmailData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, false, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if !CONFIG._enable_email_2fa() {
|
if !CONFIG._enable_email_2fa() {
|
||||||
err!("Email 2FA is disabled")
|
err!("Email 2FA is disabled")
|
||||||
@@ -144,8 +146,9 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EmailData {
|
struct EmailData {
|
||||||
Email: String,
|
Email: String,
|
||||||
MasterPasswordHash: String,
|
|
||||||
Token: String,
|
Token: String,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||||
@@ -154,9 +157,13 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
|
|||||||
let data: EmailData = data.into_inner().data;
|
let data: EmailData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
// This is the last step in the verification process, delete the otp directly afterwards
|
||||||
err!("Invalid password");
|
PasswordOrOtpData {
|
||||||
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||||
let mut twofactor =
|
let mut twofactor =
|
||||||
|
@@ -5,7 +5,7 @@ use rocket::Route;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordOrOtpData},
|
||||||
auth::{ClientHeaders, Headers},
|
auth::{ClientHeaders, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
@@ -15,6 +15,7 @@ use crate::{
|
|||||||
pub mod authenticator;
|
pub mod authenticator;
|
||||||
pub mod duo;
|
pub mod duo;
|
||||||
pub mod email;
|
pub mod email;
|
||||||
|
pub mod protected_actions;
|
||||||
pub mod webauthn;
|
pub mod webauthn;
|
||||||
pub mod yubikey;
|
pub mod yubikey;
|
||||||
|
|
||||||
@@ -33,6 +34,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
routes.append(&mut email::routes());
|
routes.append(&mut email::routes());
|
||||||
routes.append(&mut webauthn::routes());
|
routes.append(&mut webauthn::routes());
|
||||||
routes.append(&mut yubikey::routes());
|
routes.append(&mut yubikey::routes());
|
||||||
|
routes.append(&mut protected_actions::routes());
|
||||||
|
|
||||||
routes
|
routes
|
||||||
}
|
}
|
||||||
@@ -50,13 +52,11 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-recover", data = "<data>")]
|
#[post("/two-factor/get-recover", data = "<data>")]
|
||||||
fn get_recover(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
|
async fn get_recover(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Code": user.totp_recover,
|
"Code": user.totp_recover,
|
||||||
@@ -123,19 +123,23 @@ async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
|||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct DisableTwoFactorData {
|
struct DisableTwoFactorData {
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
Type: NumberOrString,
|
Type: NumberOrString,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/disable", data = "<data>")]
|
#[post("/two-factor/disable", data = "<data>")]
|
||||||
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: DisableTwoFactorData = data.into_inner().data;
|
let data: DisableTwoFactorData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
// Delete directly after a valid token has been provided
|
||||||
err!("Invalid password");
|
PasswordOrOtpData {
|
||||||
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let type_ = data.Type.into_i32()?;
|
let type_ = data.Type.into_i32()?;
|
||||||
|
|
||||||
|
142
src/api/core/two_factor/protected_actions.rs
Normal file
142
src/api/core/two_factor/protected_actions.rs
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
use chrono::{Duration, NaiveDateTime, Utc};
|
||||||
|
use rocket::Route;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::{EmptyResult, JsonUpcase},
|
||||||
|
auth::Headers,
|
||||||
|
crypto,
|
||||||
|
db::{
|
||||||
|
models::{TwoFactor, TwoFactorType},
|
||||||
|
DbConn,
|
||||||
|
},
|
||||||
|
error::{Error, MapResult},
|
||||||
|
mail, CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![request_otp, verify_otp]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data stored in the TwoFactor table in the db
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct ProtectedActionData {
|
||||||
|
/// Token issued to validate the protected action
|
||||||
|
pub token: String,
|
||||||
|
/// UNIX timestamp of token issue.
|
||||||
|
pub token_sent: i64,
|
||||||
|
// The total amount of attempts
|
||||||
|
pub attempts: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProtectedActionData {
|
||||||
|
pub fn new(token: String) -> Self {
|
||||||
|
Self {
|
||||||
|
token,
|
||||||
|
token_sent: Utc::now().naive_utc().timestamp(),
|
||||||
|
attempts: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_json(&self) -> String {
|
||||||
|
serde_json::to_string(&self).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_json(string: &str) -> Result<Self, Error> {
|
||||||
|
let res: Result<Self, crate::serde_json::Error> = serde_json::from_str(string);
|
||||||
|
match res {
|
||||||
|
Ok(x) => Ok(x),
|
||||||
|
Err(_) => err!("Could not decode ProtectedActionData from string"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_attempt(&mut self) {
|
||||||
|
self.attempts += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/request-otp")]
|
||||||
|
async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.mail_enabled() {
|
||||||
|
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
|
// Only one Protected Action per user is allowed to take place, delete the previous one
|
||||||
|
if let Some(pa) =
|
||||||
|
TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::ProtectedActions as i32, &mut conn).await
|
||||||
|
{
|
||||||
|
pa.delete(&mut conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||||
|
let pa_data = ProtectedActionData::new(generated_token);
|
||||||
|
|
||||||
|
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||||
|
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::ProtectedActions, pa_data.to_json());
|
||||||
|
twofactor.save(&mut conn).await?;
|
||||||
|
|
||||||
|
mail::send_protected_action_token(&user.email, &pa_data.token).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct ProtectedActionVerify {
|
||||||
|
OTP: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/verify-otp", data = "<data>")]
|
||||||
|
async fn verify_otp(data: JsonUpcase<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.mail_enabled() {
|
||||||
|
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user = headers.user;
|
||||||
|
let data: ProtectedActionVerify = data.into_inner().data;
|
||||||
|
|
||||||
|
// Delete the token after one validation attempt
|
||||||
|
// This endpoint only gets called for the vault export, and doesn't need a second attempt
|
||||||
|
validate_protected_action_otp(&data.OTP, &user.uuid, true, &mut conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn validate_protected_action_otp(
|
||||||
|
otp: &str,
|
||||||
|
user_uuid: &str,
|
||||||
|
delete_if_valid: bool,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let pa = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::ProtectedActions as i32, conn)
|
||||||
|
.await
|
||||||
|
.map_res("Protected action token not found, try sending the code again or restart the process")?;
|
||||||
|
let mut pa_data = ProtectedActionData::from_json(&pa.data)?;
|
||||||
|
|
||||||
|
pa_data.add_attempt();
|
||||||
|
// Delete the token after x attempts if it has been used too many times
|
||||||
|
// We use the 6, which should be more then enough for invalid attempts and multiple valid checks
|
||||||
|
if pa_data.attempts > 6 {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
err!("Token has expired")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the token has expired (Using the email 2fa expiration time)
|
||||||
|
let date =
|
||||||
|
NaiveDateTime::from_timestamp_opt(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.");
|
||||||
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
|
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
err!("Token has expired")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !crypto::ct_eq(&pa_data.token, otp) {
|
||||||
|
pa.save(conn).await?;
|
||||||
|
err!("Token is invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
if delete_if_valid {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@@ -7,7 +7,7 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{
|
db::{
|
||||||
@@ -103,16 +103,17 @@ impl WebauthnRegistration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||||
async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_webauthn(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
if !CONFIG.domain_set() {
|
if !CONFIG.domain_set() {
|
||||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
err!("Invalid password");
|
let user = headers.user;
|
||||||
}
|
|
||||||
|
|
||||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &mut conn).await?;
|
data.validate(&user, false, &mut conn).await?;
|
||||||
|
|
||||||
|
let (enabled, registrations) = get_webauthn_registrations(&user.uuid, &mut conn).await?;
|
||||||
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
@@ -123,12 +124,17 @@ async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||||
async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_webauthn_challenge(
|
||||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
err!("Invalid password");
|
headers: Headers,
|
||||||
}
|
mut conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &mut conn)
|
data.validate(&user, false, &mut conn).await?;
|
||||||
|
|
||||||
|
let registrations = get_webauthn_registrations(&user.uuid, &mut conn)
|
||||||
.await?
|
.await?
|
||||||
.1
|
.1
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -136,16 +142,16 @@ async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: He
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
|
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
|
||||||
headers.user.uuid.as_bytes().to_vec(),
|
user.uuid.as_bytes().to_vec(),
|
||||||
headers.user.email,
|
user.email,
|
||||||
headers.user.name,
|
user.name,
|
||||||
Some(registrations),
|
Some(registrations),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||||
|
|
||||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||||
challenge_value["status"] = "ok".into();
|
challenge_value["status"] = "ok".into();
|
||||||
@@ -158,8 +164,9 @@ async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: He
|
|||||||
struct EnableWebauthnData {
|
struct EnableWebauthnData {
|
||||||
Id: NumberOrString, // 1..5
|
Id: NumberOrString, // 1..5
|
||||||
Name: String,
|
Name: String,
|
||||||
MasterPasswordHash: String,
|
|
||||||
DeviceResponse: RegisterPublicKeyCredentialCopy,
|
DeviceResponse: RegisterPublicKeyCredentialCopy,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
|
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
|
||||||
@@ -246,9 +253,12 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
|
|||||||
let data: EnableWebauthnData = data.into_inner().data;
|
let data: EnableWebauthnData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Retrieve and delete the saved challenge state
|
// Retrieve and delete the saved challenge state
|
||||||
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
||||||
|
@@ -6,7 +6,7 @@ use yubico::{config::Config, verify};
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{
|
db::{
|
||||||
@@ -24,13 +24,14 @@ pub fn routes() -> Vec<Route> {
|
|||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EnableYubikeyData {
|
struct EnableYubikeyData {
|
||||||
MasterPasswordHash: String,
|
|
||||||
Key1: Option<String>,
|
Key1: Option<String>,
|
||||||
Key2: Option<String>,
|
Key2: Option<String>,
|
||||||
Key3: Option<String>,
|
Key3: Option<String>,
|
||||||
Key4: Option<String>,
|
Key4: Option<String>,
|
||||||
Key5: Option<String>,
|
Key5: Option<String>,
|
||||||
Nfc: bool,
|
Nfc: bool,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Debug)]
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
@@ -83,16 +84,14 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-yubikey", data = "<data>")]
|
#[post("/two-factor/get-yubikey", data = "<data>")]
|
||||||
async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_yubikey(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
// Make sure the credentials are set
|
// Make sure the credentials are set
|
||||||
get_yubico_credentials()?;
|
get_yubico_credentials()?;
|
||||||
|
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let user_uuid = &user.uuid;
|
let user_uuid = &user.uuid;
|
||||||
let yubikey_type = TwoFactorType::YubiKey as i32;
|
let yubikey_type = TwoFactorType::YubiKey as i32;
|
||||||
@@ -122,9 +121,12 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||||||
let data: EnableYubikeyData = data.into_inner().data;
|
let data: EnableYubikeyData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash.clone(),
|
||||||
|
Otp: data.Otp.clone(),
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Check if we already have some data
|
// Check if we already have some data
|
||||||
let mut yubikey_data =
|
let mut yubikey_data =
|
||||||
|
@@ -534,11 +534,11 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
|||||||
let mut referer = String::new();
|
let mut referer = String::new();
|
||||||
|
|
||||||
if let Ok(content) = resp {
|
if let Ok(content) = resp {
|
||||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
// Extract the URL from the response in case redirects occurred (like @ gitlab.com)
|
||||||
let url = content.url().clone();
|
let url = content.url().clone();
|
||||||
|
|
||||||
// Set the referer to be used on the final request, some sites check this.
|
// Set the referer to be used on the final request, some sites check this.
|
||||||
// Mostly used to prevent direct linking and other security resons.
|
// Mostly used to prevent direct linking and other security reasons.
|
||||||
referer = url.to_string();
|
referer = url.to_string();
|
||||||
|
|
||||||
// Add the fallback favicon.ico and apple-touch-icon.png to the list with the domain the content responded from.
|
// Add the fallback favicon.ico and apple-touch-icon.png to the list with the domain the content responded from.
|
||||||
@@ -638,7 +638,7 @@ fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a Tuple with the width and hight as a seperate value extracted from the sizes attribute
|
/// Returns a Tuple with the width and height as a separate value extracted from the sizes attribute
|
||||||
/// It will return 0 for both values if no match has been found.
|
/// It will return 0 for both values if no match has been found.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
@@ -685,7 +685,9 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
|||||||
|
|
||||||
for icon in icon_result.iconlist.iter().take(5) {
|
for icon in icon_result.iconlist.iter().take(5) {
|
||||||
if icon.href.starts_with("data:image") {
|
if icon.href.starts_with("data:image") {
|
||||||
let Ok(datauri) = DataUrl::process(&icon.href) else {continue};
|
let Ok(datauri) = DataUrl::process(&icon.href) else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
// Check if we are able to decode the data uri
|
// Check if we are able to decode the data uri
|
||||||
let mut body = BytesMut::new();
|
let mut body = BytesMut::new();
|
||||||
match datauri.decode::<_, ()>(|bytes| {
|
match datauri.decode::<_, ()>(|bytes| {
|
||||||
|
@@ -469,7 +469,7 @@ async fn twofactor_auth(
|
|||||||
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?;
|
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?;
|
||||||
|
|
||||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
|
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
||||||
|
|
||||||
let twofactor_code = match data.two_factor_token {
|
let twofactor_code = match data.two_factor_token {
|
||||||
Some(ref code) => code,
|
Some(ref code) => code,
|
||||||
|
@@ -32,6 +32,7 @@ pub use crate::api::{
|
|||||||
web::routes as web_routes,
|
web::routes as web_routes,
|
||||||
web::static_files,
|
web::static_files,
|
||||||
};
|
};
|
||||||
|
use crate::db::{models::User, DbConn};
|
||||||
use crate::util;
|
use crate::util;
|
||||||
|
|
||||||
// Type aliases for API methods results
|
// Type aliases for API methods results
|
||||||
@@ -46,8 +47,31 @@ type JsonVec<T> = Json<Vec<T>>;
|
|||||||
// Common structs representing JSON data received
|
// Common structs representing JSON data received
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct PasswordData {
|
struct PasswordOrOtpData {
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PasswordOrOtpData {
|
||||||
|
/// Tokens used via this struct can be used multiple times during the process
|
||||||
|
/// First for the validation to continue, after that to enable or validate the following actions
|
||||||
|
/// This is different per caller, so it can be adjusted to delete the token or not
|
||||||
|
pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
|
||||||
|
|
||||||
|
match (self.MasterPasswordHash.as_deref(), self.Otp.as_deref()) {
|
||||||
|
(Some(pw_hash), None) => {
|
||||||
|
if !user.check_valid_password(pw_hash) {
|
||||||
|
err!("Invalid password");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(None, Some(otp)) => {
|
||||||
|
validate_protected_action_otp(otp, &user.uuid, delete_if_valid, conn).await?;
|
||||||
|
}
|
||||||
|
_ => err!("No validation provided"),
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Clone)]
|
#[derive(Deserialize, Debug, Clone)]
|
||||||
|
@@ -124,7 +124,9 @@ fn websockets_hub<'r>(
|
|||||||
err_code!("Invalid claim", 401)
|
err_code!("Invalid claim", 401)
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(claims) = crate::auth::decode_login(&token) else { err_code!("Invalid token", 401) };
|
let Ok(claims) = crate::auth::decode_login(&token) else {
|
||||||
|
err_code!("Invalid token", 401)
|
||||||
|
};
|
||||||
|
|
||||||
let (mut rx, guard) = {
|
let (mut rx, guard) = {
|
||||||
let users = Arc::clone(&WS_USERS);
|
let users = Arc::clone(&WS_USERS);
|
||||||
|
@@ -116,7 +116,7 @@ pub async fn unregister_push_device(uuid: String) -> EmptyResult {
|
|||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
Err(e) => err!(format!("An error occured during device unregistration: {e}")),
|
Err(e) => err!(format!("An error occurred during device unregistration: {e}")),
|
||||||
};
|
};
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -252,7 +252,7 @@ async fn send_to_push_relay(notification_data: Value) {
|
|||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
error!("An error occured while sending a send update to the push relay: {}", e);
|
error!("An error occurred while sending a send update to the push relay: {}", e);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -12,7 +12,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
// If addding more routes here, consider also adding them to
|
// If adding more routes here, consider also adding them to
|
||||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||||
let mut routes = routes![attachments, alive, alive_head, static_files];
|
let mut routes = routes![attachments, alive, alive_head, static_files];
|
||||||
if CONFIG.web_vault_enabled() {
|
if CONFIG.web_vault_enabled() {
|
||||||
@@ -100,7 +100,9 @@ async fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
|||||||
|
|
||||||
#[get("/attachments/<uuid>/<file_id>?<token>")]
|
#[get("/attachments/<uuid>/<file_id>?<token>")]
|
||||||
async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Option<NamedFile> {
|
async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Option<NamedFile> {
|
||||||
let Ok(claims) = decode_file_download(&token) else { return None };
|
let Ok(claims) = decode_file_download(&token) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
if claims.sub != *uuid || claims.file_id != *file_id {
|
if claims.sub != *uuid || claims.file_id != *file_id {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@@ -126,7 +126,7 @@ macro_rules! make_config {
|
|||||||
|
|
||||||
if show_overrides && !overrides.is_empty() {
|
if show_overrides && !overrides.is_empty() {
|
||||||
// We can't use warn! here because logging isn't setup yet.
|
// We can't use warn! here because logging isn't setup yet.
|
||||||
println!("[WARNING] The following environment variables are being overriden by the config.json file.");
|
println!("[WARNING] The following environment variables are being overridden by the config.json file.");
|
||||||
println!("[WARNING] Please use the admin panel to make changes to them:");
|
println!("[WARNING] Please use the admin panel to make changes to them:");
|
||||||
println!("[WARNING] {}\n", overrides.join(", "));
|
println!("[WARNING] {}\n", overrides.join(", "));
|
||||||
}
|
}
|
||||||
@@ -164,7 +164,7 @@ macro_rules! make_config {
|
|||||||
)+)+
|
)+)+
|
||||||
|
|
||||||
pub fn prepare_json(&self) -> serde_json::Value {
|
pub fn prepare_json(&self) -> serde_json::Value {
|
||||||
let (def, cfg, overriden) = {
|
let (def, cfg, overridden) = {
|
||||||
let inner = &self.inner.read().unwrap();
|
let inner = &self.inner.read().unwrap();
|
||||||
(inner._env.build(), inner.config.clone(), inner._overrides.clone())
|
(inner._env.build(), inner.config.clone(), inner._overrides.clone())
|
||||||
};
|
};
|
||||||
@@ -211,7 +211,7 @@ macro_rules! make_config {
|
|||||||
element.insert("default".into(), serde_json::to_value(def.$name).unwrap());
|
element.insert("default".into(), serde_json::to_value(def.$name).unwrap());
|
||||||
element.insert("type".into(), (_get_form_type(stringify!($ty))).into());
|
element.insert("type".into(), (_get_form_type(stringify!($ty))).into());
|
||||||
element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into());
|
element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into());
|
||||||
element.insert("overridden".into(), (overriden.contains(&paste::paste!(stringify!([<$name:upper>])).into())).into());
|
element.insert("overridden".into(), (overridden.contains(&paste::paste!(stringify!([<$name:upper>])).into())).into());
|
||||||
element
|
element
|
||||||
}),
|
}),
|
||||||
)+
|
)+
|
||||||
@@ -480,6 +480,8 @@ make_config! {
|
|||||||
invitation_expiration_hours: u32, false, def, 120;
|
invitation_expiration_hours: u32, false, def, 120;
|
||||||
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
||||||
emergency_access_allowed: bool, true, def, true;
|
emergency_access_allowed: bool, true, def, true;
|
||||||
|
/// Allow email change |> Controls whether users can change their email. This setting applies globally to all users.
|
||||||
|
email_change_allowed: bool, true, def, true;
|
||||||
/// Password iterations |> Number of server-side passwords hashing iterations for the password hash.
|
/// Password iterations |> Number of server-side passwords hashing iterations for the password hash.
|
||||||
/// The default for new users. If changed, it will be updated during login for existing users.
|
/// The default for new users. If changed, it will be updated during login for existing users.
|
||||||
password_iterations: i32, true, def, 600_000;
|
password_iterations: i32, true, def, 600_000;
|
||||||
@@ -1241,17 +1243,18 @@ where
|
|||||||
reg!("email/invite_accepted", ".html");
|
reg!("email/invite_accepted", ".html");
|
||||||
reg!("email/invite_confirmed", ".html");
|
reg!("email/invite_confirmed", ".html");
|
||||||
reg!("email/new_device_logged_in", ".html");
|
reg!("email/new_device_logged_in", ".html");
|
||||||
|
reg!("email/protected_action", ".html");
|
||||||
reg!("email/pw_hint_none", ".html");
|
reg!("email/pw_hint_none", ".html");
|
||||||
reg!("email/pw_hint_some", ".html");
|
reg!("email/pw_hint_some", ".html");
|
||||||
reg!("email/send_2fa_removed_from_org", ".html");
|
reg!("email/send_2fa_removed_from_org", ".html");
|
||||||
reg!("email/send_single_org_removed_from_org", ".html");
|
|
||||||
reg!("email/send_org_invite", ".html");
|
|
||||||
reg!("email/send_emergency_access_invite", ".html");
|
reg!("email/send_emergency_access_invite", ".html");
|
||||||
|
reg!("email/send_org_invite", ".html");
|
||||||
|
reg!("email/send_single_org_removed_from_org", ".html");
|
||||||
|
reg!("email/smtp_test", ".html");
|
||||||
reg!("email/twofactor_email", ".html");
|
reg!("email/twofactor_email", ".html");
|
||||||
reg!("email/verify_email", ".html");
|
reg!("email/verify_email", ".html");
|
||||||
reg!("email/welcome", ".html");
|
|
||||||
reg!("email/welcome_must_verify", ".html");
|
reg!("email/welcome_must_verify", ".html");
|
||||||
reg!("email/smtp_test", ".html");
|
reg!("email/welcome", ".html");
|
||||||
|
|
||||||
reg!("admin/base");
|
reg!("admin/base");
|
||||||
reg!("admin/login");
|
reg!("admin/login");
|
||||||
|
@@ -7,7 +7,6 @@ use diesel::{
|
|||||||
|
|
||||||
use rocket::{
|
use rocket::{
|
||||||
http::Status,
|
http::Status,
|
||||||
outcome::IntoOutcome,
|
|
||||||
request::{FromRequest, Outcome},
|
request::{FromRequest, Outcome},
|
||||||
Request,
|
Request,
|
||||||
};
|
};
|
||||||
@@ -413,8 +412,11 @@ impl<'r> FromRequest<'r> for DbConn {
|
|||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
match request.rocket().state::<DbPool>() {
|
match request.rocket().state::<DbPool>() {
|
||||||
Some(p) => p.get().await.map_err(|_| ()).into_outcome(Status::ServiceUnavailable),
|
Some(p) => match p.get().await {
|
||||||
None => Outcome::Failure((Status::InternalServerError, ())),
|
Ok(dbconn) => Outcome::Success(dbconn),
|
||||||
|
_ => Outcome::Error((Status::ServiceUnavailable, ())),
|
||||||
|
},
|
||||||
|
None => Outcome::Error((Status::InternalServerError, ())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -479,21 +481,9 @@ mod postgresql_migrations {
|
|||||||
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/postgresql");
|
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/postgresql");
|
||||||
|
|
||||||
pub fn run_migrations() -> Result<(), super::Error> {
|
pub fn run_migrations() -> Result<(), super::Error> {
|
||||||
use diesel::{Connection, RunQueryDsl};
|
use diesel::Connection;
|
||||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||||
let mut connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
let mut connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
||||||
// Disable Foreign Key Checks during migration
|
|
||||||
|
|
||||||
// FIXME: Per https://www.postgresql.org/docs/12/sql-set-constraints.html,
|
|
||||||
// "SET CONSTRAINTS sets the behavior of constraint checking within the
|
|
||||||
// current transaction", so this setting probably won't take effect for
|
|
||||||
// any of the migrations since it's being run outside of a transaction.
|
|
||||||
// Migrations that need to disable foreign key checks should run this
|
|
||||||
// from within the migration script itself.
|
|
||||||
diesel::sql_query("SET CONSTRAINTS ALL DEFERRED")
|
|
||||||
.execute(&mut connection)
|
|
||||||
.expect("Failed to disable Foreign Key Checks during migrations");
|
|
||||||
|
|
||||||
connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations");
|
connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@@ -20,9 +20,9 @@ db_object! {
|
|||||||
pub access_code: String,
|
pub access_code: String,
|
||||||
pub public_key: String,
|
pub public_key: String,
|
||||||
|
|
||||||
pub enc_key: String,
|
pub enc_key: Option<String>,
|
||||||
|
|
||||||
pub master_password_hash: String,
|
pub master_password_hash: Option<String>,
|
||||||
pub approved: Option<bool>,
|
pub approved: Option<bool>,
|
||||||
pub creation_date: NaiveDateTime,
|
pub creation_date: NaiveDateTime,
|
||||||
pub response_date: Option<NaiveDateTime>,
|
pub response_date: Option<NaiveDateTime>,
|
||||||
@@ -53,8 +53,8 @@ impl AuthRequest {
|
|||||||
response_device_id: None,
|
response_device_id: None,
|
||||||
access_code,
|
access_code,
|
||||||
public_key,
|
public_key,
|
||||||
enc_key: String::new(),
|
enc_key: None,
|
||||||
master_password_hash: String::new(),
|
master_password_hash: None,
|
||||||
approved: None,
|
approved: None,
|
||||||
creation_date: now,
|
creation_date: now,
|
||||||
response_date: None,
|
response_date: None,
|
||||||
|
@@ -23,12 +23,13 @@ db_object! {
|
|||||||
pub user_uuid: Option<String>,
|
pub user_uuid: Option<String>,
|
||||||
pub organization_uuid: Option<String>,
|
pub organization_uuid: Option<String>,
|
||||||
|
|
||||||
|
pub key: Option<String>,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Login = 1,
|
Login = 1,
|
||||||
SecureNote = 2,
|
SecureNote = 2,
|
||||||
Card = 3,
|
Card = 3,
|
||||||
Identity = 4,
|
Identity = 4
|
||||||
Fido2key = 5
|
|
||||||
*/
|
*/
|
||||||
pub atype: i32,
|
pub atype: i32,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
@@ -62,6 +63,8 @@ impl Cipher {
|
|||||||
user_uuid: None,
|
user_uuid: None,
|
||||||
organization_uuid: None,
|
organization_uuid: None,
|
||||||
|
|
||||||
|
key: None,
|
||||||
|
|
||||||
atype,
|
atype,
|
||||||
name,
|
name,
|
||||||
|
|
||||||
@@ -203,6 +206,7 @@ impl Cipher {
|
|||||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||||
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||||
"OrganizationId": self.organization_uuid,
|
"OrganizationId": self.organization_uuid,
|
||||||
|
"Key": self.key,
|
||||||
"Attachments": attachments_json,
|
"Attachments": attachments_json,
|
||||||
// We have UseTotp set to true by default within the Organization model.
|
// We have UseTotp set to true by default within the Organization model.
|
||||||
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
|
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
|
||||||
@@ -224,7 +228,6 @@ impl Cipher {
|
|||||||
"SecureNote": null,
|
"SecureNote": null,
|
||||||
"Card": null,
|
"Card": null,
|
||||||
"Identity": null,
|
"Identity": null,
|
||||||
"Fido2Key": null,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// These values are only needed for user/default syncs
|
// These values are only needed for user/default syncs
|
||||||
@@ -253,7 +256,6 @@ impl Cipher {
|
|||||||
2 => "SecureNote",
|
2 => "SecureNote",
|
||||||
3 => "Card",
|
3 => "Card",
|
||||||
4 => "Identity",
|
4 => "Identity",
|
||||||
5 => "Fido2Key",
|
|
||||||
_ => panic!("Wrong type"),
|
_ => panic!("Wrong type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -31,6 +31,7 @@ db_object! {
|
|||||||
pub status: i32,
|
pub status: i32,
|
||||||
pub atype: i32,
|
pub atype: i32,
|
||||||
pub reset_password_key: Option<String>,
|
pub reset_password_key: Option<String>,
|
||||||
|
pub external_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
@@ -208,19 +209,37 @@ impl UserOrganization {
|
|||||||
status: UserOrgStatus::Accepted as i32,
|
status: UserOrgStatus::Accepted as i32,
|
||||||
atype: UserOrgType::User as i32,
|
atype: UserOrgType::User as i32,
|
||||||
reset_password_key: None,
|
reset_password_key: None,
|
||||||
|
external_id: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn restore(&mut self) {
|
pub fn restore(&mut self) -> bool {
|
||||||
if self.status < UserOrgStatus::Accepted as i32 {
|
if self.status < UserOrgStatus::Accepted as i32 {
|
||||||
self.status += ACTIVATE_REVOKE_DIFF;
|
self.status += ACTIVATE_REVOKE_DIFF;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn revoke(&mut self) {
|
pub fn revoke(&mut self) -> bool {
|
||||||
if self.status > UserOrgStatus::Revoked as i32 {
|
if self.status > UserOrgStatus::Revoked as i32 {
|
||||||
self.status -= ACTIVATE_REVOKE_DIFF;
|
self.status -= ACTIVATE_REVOKE_DIFF;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_external_id(&mut self, external_id: Option<String>) -> bool {
|
||||||
|
//Check if external id is empty. We don't want to have
|
||||||
|
//empty strings in the database
|
||||||
|
if self.external_id != external_id {
|
||||||
|
self.external_id = match external_id {
|
||||||
|
Some(external_id) if !external_id.is_empty() => Some(external_id),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -396,7 +415,7 @@ impl UserOrganization {
|
|||||||
let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap();
|
let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap();
|
||||||
|
|
||||||
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
|
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
|
||||||
// We subtract/add a number so we can restore/activate the user to it's previouse state again.
|
// We subtract/add a number so we can restore/activate the user to it's previous state again.
|
||||||
let status = if self.status < UserOrgStatus::Revoked as i32 {
|
let status = if self.status < UserOrgStatus::Revoked as i32 {
|
||||||
UserOrgStatus::Revoked as i32
|
UserOrgStatus::Revoked as i32
|
||||||
} else {
|
} else {
|
||||||
@@ -434,7 +453,7 @@ impl UserOrganization {
|
|||||||
"UserId": self.user_uuid,
|
"UserId": self.user_uuid,
|
||||||
"Name": user.name,
|
"Name": user.name,
|
||||||
"Email": user.email,
|
"Email": user.email,
|
||||||
"ExternalId": user.external_id,
|
"ExternalId": self.external_id,
|
||||||
"Groups": groups,
|
"Groups": groups,
|
||||||
"Collections": collections,
|
"Collections": collections,
|
||||||
|
|
||||||
@@ -475,7 +494,7 @@ impl UserOrganization {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
|
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
|
||||||
// We subtract/add a number so we can restore/activate the user to it's previouse state again.
|
// We subtract/add a number so we can restore/activate the user to it's previous state again.
|
||||||
let status = if self.status < UserOrgStatus::Revoked as i32 {
|
let status = if self.status < UserOrgStatus::Revoked as i32 {
|
||||||
UserOrgStatus::Revoked as i32
|
UserOrgStatus::Revoked as i32
|
||||||
} else {
|
} else {
|
||||||
@@ -778,6 +797,17 @@ impl UserOrganization {
|
|||||||
.load::<UserOrganizationDb>(conn).expect("Error loading user organizations").from_db()
|
.load::<UserOrganizationDb>(conn).expect("Error loading user organizations").from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_external_id_and_org(ext_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||||
|
db_run! {conn: {
|
||||||
|
users_organizations::table
|
||||||
|
.filter(
|
||||||
|
users_organizations::external_id.eq(ext_id)
|
||||||
|
.and(users_organizations::org_uuid.eq(org_uuid))
|
||||||
|
)
|
||||||
|
.first::<UserOrganizationDb>(conn).ok().from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OrganizationApiKey {
|
impl OrganizationApiKey {
|
||||||
|
@@ -34,6 +34,9 @@ pub enum TwoFactorType {
|
|||||||
EmailVerificationChallenge = 1002,
|
EmailVerificationChallenge = 1002,
|
||||||
WebauthnRegisterChallenge = 1003,
|
WebauthnRegisterChallenge = 1003,
|
||||||
WebauthnLoginChallenge = 1004,
|
WebauthnLoginChallenge = 1004,
|
||||||
|
|
||||||
|
// Special type for Protected Actions verification via email
|
||||||
|
ProtectedActions = 2000,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
|
@@ -51,7 +51,7 @@ db_object! {
|
|||||||
|
|
||||||
pub avatar_color: Option<String>,
|
pub avatar_color: Option<String>,
|
||||||
|
|
||||||
pub external_id: Option<String>,
|
pub external_id: Option<String>, // Todo: Needs to be removed in the future, this is not used anymore.
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Identifiable, Queryable, Insertable)]
|
#[derive(Identifiable, Queryable, Insertable)]
|
||||||
@@ -129,7 +129,7 @@ impl User {
|
|||||||
|
|
||||||
avatar_color: None,
|
avatar_color: None,
|
||||||
|
|
||||||
external_id: None,
|
external_id: None, // Todo: Needs to be removed in the future, this is not used anymore.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,18 +154,6 @@ impl User {
|
|||||||
matches!(self.api_key, Some(ref api_key) if crate::crypto::ct_eq(api_key, key))
|
matches!(self.api_key, Some(ref api_key) if crate::crypto::ct_eq(api_key, key))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_external_id(&mut self, external_id: Option<String>) {
|
|
||||||
//Check if external id is empty. We don't want to have
|
|
||||||
//empty strings in the database
|
|
||||||
let mut ext_id: Option<String> = None;
|
|
||||||
if let Some(external_id) = external_id {
|
|
||||||
if !external_id.is_empty() {
|
|
||||||
ext_id = Some(external_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.external_id = ext_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the password hash generated
|
/// Set the password hash generated
|
||||||
/// And resets the security_stamp. Based upon the allow_next_route the security_stamp will be different.
|
/// And resets the security_stamp. Based upon the allow_next_route the security_stamp will be different.
|
||||||
///
|
///
|
||||||
@@ -392,12 +380,6 @@ impl User {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_external_id(id: &str, conn: &mut DbConn) -> Option<Self> {
|
|
||||||
db_run! {conn: {
|
|
||||||
users::table.filter(users::external_id.eq(id)).first::<UserDb>(conn).ok().from_db()
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_all(conn: &mut DbConn) -> Vec<Self> {
|
pub async fn get_all(conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
users::table.load::<UserDb>(conn).expect("Error loading users").from_db()
|
users::table.load::<UserDb>(conn).expect("Error loading users").from_db()
|
||||||
|
@@ -15,6 +15,7 @@ table! {
|
|||||||
updated_at -> Datetime,
|
updated_at -> Datetime,
|
||||||
user_uuid -> Nullable<Text>,
|
user_uuid -> Nullable<Text>,
|
||||||
organization_uuid -> Nullable<Text>,
|
organization_uuid -> Nullable<Text>,
|
||||||
|
key -> Nullable<Text>,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
name -> Text,
|
name -> Text,
|
||||||
notes -> Nullable<Text>,
|
notes -> Nullable<Text>,
|
||||||
@@ -228,6 +229,7 @@ table! {
|
|||||||
status -> Integer,
|
status -> Integer,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
reset_password_key -> Nullable<Text>,
|
reset_password_key -> Nullable<Text>,
|
||||||
|
external_id -> Nullable<Text>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -297,8 +299,8 @@ table! {
|
|||||||
response_device_id -> Nullable<Text>,
|
response_device_id -> Nullable<Text>,
|
||||||
access_code -> Text,
|
access_code -> Text,
|
||||||
public_key -> Text,
|
public_key -> Text,
|
||||||
enc_key -> Text,
|
enc_key -> Nullable<Text>,
|
||||||
master_password_hash -> Text,
|
master_password_hash -> Nullable<Text>,
|
||||||
approved -> Nullable<Bool>,
|
approved -> Nullable<Bool>,
|
||||||
creation_date -> Timestamp,
|
creation_date -> Timestamp,
|
||||||
response_date -> Nullable<Timestamp>,
|
response_date -> Nullable<Timestamp>,
|
||||||
@@ -324,6 +326,7 @@ joinable!(users_collections -> collections (collection_uuid));
|
|||||||
joinable!(users_collections -> users (user_uuid));
|
joinable!(users_collections -> users (user_uuid));
|
||||||
joinable!(users_organizations -> organizations (org_uuid));
|
joinable!(users_organizations -> organizations (org_uuid));
|
||||||
joinable!(users_organizations -> users (user_uuid));
|
joinable!(users_organizations -> users (user_uuid));
|
||||||
|
joinable!(users_organizations -> ciphers (org_uuid));
|
||||||
joinable!(organization_api_key -> organizations (org_uuid));
|
joinable!(organization_api_key -> organizations (org_uuid));
|
||||||
joinable!(emergency_access -> users (grantor_uuid));
|
joinable!(emergency_access -> users (grantor_uuid));
|
||||||
joinable!(groups -> organizations (organizations_uuid));
|
joinable!(groups -> organizations (organizations_uuid));
|
||||||
|
@@ -15,6 +15,7 @@ table! {
|
|||||||
updated_at -> Timestamp,
|
updated_at -> Timestamp,
|
||||||
user_uuid -> Nullable<Text>,
|
user_uuid -> Nullable<Text>,
|
||||||
organization_uuid -> Nullable<Text>,
|
organization_uuid -> Nullable<Text>,
|
||||||
|
key -> Nullable<Text>,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
name -> Text,
|
name -> Text,
|
||||||
notes -> Nullable<Text>,
|
notes -> Nullable<Text>,
|
||||||
@@ -228,6 +229,7 @@ table! {
|
|||||||
status -> Integer,
|
status -> Integer,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
reset_password_key -> Nullable<Text>,
|
reset_password_key -> Nullable<Text>,
|
||||||
|
external_id -> Nullable<Text>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -297,8 +299,8 @@ table! {
|
|||||||
response_device_id -> Nullable<Text>,
|
response_device_id -> Nullable<Text>,
|
||||||
access_code -> Text,
|
access_code -> Text,
|
||||||
public_key -> Text,
|
public_key -> Text,
|
||||||
enc_key -> Text,
|
enc_key -> Nullable<Text>,
|
||||||
master_password_hash -> Text,
|
master_password_hash -> Nullable<Text>,
|
||||||
approved -> Nullable<Bool>,
|
approved -> Nullable<Bool>,
|
||||||
creation_date -> Timestamp,
|
creation_date -> Timestamp,
|
||||||
response_date -> Nullable<Timestamp>,
|
response_date -> Nullable<Timestamp>,
|
||||||
@@ -324,6 +326,7 @@ joinable!(users_collections -> collections (collection_uuid));
|
|||||||
joinable!(users_collections -> users (user_uuid));
|
joinable!(users_collections -> users (user_uuid));
|
||||||
joinable!(users_organizations -> organizations (org_uuid));
|
joinable!(users_organizations -> organizations (org_uuid));
|
||||||
joinable!(users_organizations -> users (user_uuid));
|
joinable!(users_organizations -> users (user_uuid));
|
||||||
|
joinable!(users_organizations -> ciphers (org_uuid));
|
||||||
joinable!(organization_api_key -> organizations (org_uuid));
|
joinable!(organization_api_key -> organizations (org_uuid));
|
||||||
joinable!(emergency_access -> users (grantor_uuid));
|
joinable!(emergency_access -> users (grantor_uuid));
|
||||||
joinable!(groups -> organizations (organizations_uuid));
|
joinable!(groups -> organizations (organizations_uuid));
|
||||||
|
@@ -15,6 +15,7 @@ table! {
|
|||||||
updated_at -> Timestamp,
|
updated_at -> Timestamp,
|
||||||
user_uuid -> Nullable<Text>,
|
user_uuid -> Nullable<Text>,
|
||||||
organization_uuid -> Nullable<Text>,
|
organization_uuid -> Nullable<Text>,
|
||||||
|
key -> Nullable<Text>,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
name -> Text,
|
name -> Text,
|
||||||
notes -> Nullable<Text>,
|
notes -> Nullable<Text>,
|
||||||
@@ -228,6 +229,7 @@ table! {
|
|||||||
status -> Integer,
|
status -> Integer,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
reset_password_key -> Nullable<Text>,
|
reset_password_key -> Nullable<Text>,
|
||||||
|
external_id -> Nullable<Text>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -297,8 +299,8 @@ table! {
|
|||||||
response_device_id -> Nullable<Text>,
|
response_device_id -> Nullable<Text>,
|
||||||
access_code -> Text,
|
access_code -> Text,
|
||||||
public_key -> Text,
|
public_key -> Text,
|
||||||
enc_key -> Text,
|
enc_key -> Nullable<Text>,
|
||||||
master_password_hash -> Text,
|
master_password_hash -> Nullable<Text>,
|
||||||
approved -> Nullable<Bool>,
|
approved -> Nullable<Bool>,
|
||||||
creation_date -> Timestamp,
|
creation_date -> Timestamp,
|
||||||
response_date -> Nullable<Timestamp>,
|
response_date -> Nullable<Timestamp>,
|
||||||
|
@@ -291,10 +291,10 @@ macro_rules! err_json {
|
|||||||
macro_rules! err_handler {
|
macro_rules! err_handler {
|
||||||
($expr:expr) => {{
|
($expr:expr) => {{
|
||||||
error!(target: "auth", "Unauthorized Error: {}", $expr);
|
error!(target: "auth", "Unauthorized Error: {}", $expr);
|
||||||
return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $expr));
|
return ::rocket::request::Outcome::Error((rocket::http::Status::Unauthorized, $expr));
|
||||||
}};
|
}};
|
||||||
($usr_msg:expr, $log_value:expr) => {{
|
($usr_msg:expr, $log_value:expr) => {{
|
||||||
error!(target: "auth", "Unauthorized Error: {}. {}", $usr_msg, $log_value);
|
error!(target: "auth", "Unauthorized Error: {}. {}", $usr_msg, $log_value);
|
||||||
return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $usr_msg));
|
return ::rocket::request::Outcome::Error((rocket::http::Status::Unauthorized, $usr_msg));
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
13
src/mail.rs
13
src/mail.rs
@@ -517,6 +517,19 @@ pub async fn send_admin_reset_password(address: &str, user_name: &str, org_name:
|
|||||||
send_email(address, &subject, body_html, body_text).await
|
send_email(address, &subject, body_html, body_text).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn send_protected_action_token(address: &str, token: &str) -> EmptyResult {
|
||||||
|
let (subject, body_html, body_text) = get_text(
|
||||||
|
"email/protected_action",
|
||||||
|
json!({
|
||||||
|
"url": CONFIG.domain(),
|
||||||
|
"img_src": CONFIG._smtp_img_src(),
|
||||||
|
"token": token,
|
||||||
|
}),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
send_email(address, &subject, body_html, body_text).await
|
||||||
|
}
|
||||||
|
|
||||||
async fn send_with_selected_transport(email: Message) -> EmptyResult {
|
async fn send_with_selected_transport(email: Message) -> EmptyResult {
|
||||||
if CONFIG.use_sendmail() {
|
if CONFIG.use_sendmail() {
|
||||||
match sendmail_transport().send(email).await {
|
match sendmail_transport().send(email).await {
|
||||||
|
20
src/main.rs
20
src/main.rs
@@ -261,6 +261,13 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
log::LevelFilter::Off
|
log::LevelFilter::Off
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Only show handlebar logs when the level is Trace
|
||||||
|
let handlebars_level = if level >= log::LevelFilter::Trace {
|
||||||
|
log::LevelFilter::Trace
|
||||||
|
} else {
|
||||||
|
log::LevelFilter::Warn
|
||||||
|
};
|
||||||
|
|
||||||
let mut logger = fern::Dispatch::new()
|
let mut logger = fern::Dispatch::new()
|
||||||
.level(level)
|
.level(level)
|
||||||
// Hide unknown certificate errors if using self-signed
|
// Hide unknown certificate errors if using self-signed
|
||||||
@@ -282,6 +289,8 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
.level_for("rocket::shield::shield", log::LevelFilter::Warn)
|
.level_for("rocket::shield::shield", log::LevelFilter::Warn)
|
||||||
.level_for("hyper::proto", log::LevelFilter::Off)
|
.level_for("hyper::proto", log::LevelFilter::Off)
|
||||||
.level_for("hyper::client", log::LevelFilter::Off)
|
.level_for("hyper::client", log::LevelFilter::Off)
|
||||||
|
// Filter handlebars logs
|
||||||
|
.level_for("handlebars::render", handlebars_level)
|
||||||
// Prevent cookie_store logs
|
// Prevent cookie_store logs
|
||||||
.level_for("cookie_store", log::LevelFilter::Off)
|
.level_for("cookie_store", log::LevelFilter::Off)
|
||||||
// Variable level for trust-dns used by reqwest
|
// Variable level for trust-dns used by reqwest
|
||||||
@@ -317,7 +326,16 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(log_file) = CONFIG.log_file() {
|
if let Some(log_file) = CONFIG.log_file() {
|
||||||
logger = logger.chain(fern::log_file(log_file)?);
|
#[cfg(windows)]
|
||||||
|
{
|
||||||
|
logger = logger.chain(fern::log_file(log_file)?);
|
||||||
|
}
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
{
|
||||||
|
const SIGHUP: i32 = tokio::signal::unix::SignalKind::hangup().as_raw_value();
|
||||||
|
let path = Path::new(&log_file);
|
||||||
|
logger = logger.chain(fern::log_reopen1(path, [SIGHUP])?);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user