mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 10:45:57 +03:00
Compare commits
73 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
cec1e87679 | ||
|
512b3b9b7c | ||
|
93da5091e6 | ||
|
915496c103 | ||
|
ecb31c85d6 | ||
|
d722328f05 | ||
|
cb4b683dcd | ||
|
6eaf131922 | ||
|
8933ac2ee7 | ||
|
6822e445bb | ||
|
18fbc1ccf6 | ||
|
4861f6decc | ||
|
b435ee49ad | ||
|
193f86e43e | ||
|
66a7baa67c | ||
|
18d66474e0 | ||
|
ff8db4fd78 | ||
|
b2f9af718e | ||
|
198fd2fc1d | ||
|
ec8a9c82df | ||
|
ef5e0bd4e5 | ||
|
30b408eaa9 | ||
|
e205e3b7db | ||
|
ca1a9e26d8 | ||
|
f3a1385aee | ||
|
008a2cf298 | ||
|
f0c9a7fbc3 | ||
|
9162b13123 | ||
|
480bf9b0c1 | ||
|
f96c5e8a1e | ||
|
3d4be24902 | ||
|
bf41d74501 | ||
|
01e33a4919 | ||
|
bc26bfa589 | ||
|
ccc51e7580 | ||
|
99a59bc4f3 | ||
|
a77482575a | ||
|
bbd630f1ee | ||
|
d18b793c71 | ||
|
d3a1d875d5 | ||
|
d6e0ace192 | ||
|
60cbfa59bf | ||
|
5ab7010c37 | ||
|
ad2cfd8b97 | ||
|
32543c46da | ||
|
66bff73ebf | ||
|
83d5432cbf | ||
|
f579a4154c | ||
|
f5a19c5f8b | ||
|
aa9bc1f785 | ||
|
f162e85e44 | ||
|
33ef70c192 | ||
|
3d2df6ce11 | ||
|
6cdcb3b297 | ||
|
d1af468700 | ||
|
ae1c53f4e5 | ||
|
bc57c4b193 | ||
|
61ae4c9cf5 | ||
|
8d7b3db33d | ||
|
e9ec3741ae | ||
|
dacd50f3f1 | ||
|
9412112639 | ||
|
aaeae16983 | ||
|
d892880dd2 | ||
|
4395e8e888 | ||
|
3dbfc484a5 | ||
|
4ec2507073 | ||
|
ab65d7989b | ||
|
8707728cdb | ||
|
631d022e17 | ||
|
211f4492fa | ||
|
61f9081827 | ||
|
a8e5384c4a |
@@ -30,6 +30,10 @@
|
||||
## Define the size of the connection pool used for connecting to the database.
|
||||
# DATABASE_MAX_CONNS=10
|
||||
|
||||
## Database timeout
|
||||
## Timeout when acquiring database connection
|
||||
# DATABASE_TIMEOUT=30
|
||||
|
||||
## Database connection initialization
|
||||
## Allows SQL statements to be run whenever a new database connection is created.
|
||||
## This is mainly useful for connection-scoped pragmas.
|
||||
@@ -77,7 +81,7 @@
|
||||
# PUSH_INSTALLATION_ID=CHANGEME
|
||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||
## Don't change this unless you know what you're doing.
|
||||
# PUSH_RELAY_BASE_URI=https://push.bitwarden.com
|
||||
# PUSH_RELAY_URI=https://push.bitwarden.com
|
||||
|
||||
## Controls whether users are allowed to create Bitwarden Sends.
|
||||
## This setting applies globally to all users.
|
||||
@@ -93,6 +97,10 @@
|
||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||
# ORG_EVENTS_ENABLED=false
|
||||
|
||||
## Controls whether users can change their email.
|
||||
## This setting applies globally to all users
|
||||
# EMAIL_CHANGE_ALLOWED=true
|
||||
|
||||
## Number of days to retain events stored in the database.
|
||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||
# EVENTS_DAYS_RETAIN=
|
||||
|
75
.github/workflows/build.yml
vendored
75
.github/workflows/build.yml
vendored
@@ -8,9 +8,11 @@ on:
|
||||
- "migrations/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
- "rust-toolchain.toml"
|
||||
- "rustfmt.toml"
|
||||
- "diesel.toml"
|
||||
- "docker/Dockerfile.j2"
|
||||
- "docker/DockerSettings.yaml"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/build.yml"
|
||||
@@ -18,19 +20,20 @@ on:
|
||||
- "migrations/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "rust-toolchain"
|
||||
- "rust-toolchain.toml"
|
||||
- "rustfmt.toml"
|
||||
- "diesel.toml"
|
||||
- "docker/Dockerfile.j2"
|
||||
- "docker/DockerSettings.yaml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 120
|
||||
# Make warnings errors, this is to prevent warnings slipping through.
|
||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||
env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -43,13 +46,13 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: "Install dependencies Ubuntu"
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||
# End Install dependencies
|
||||
|
||||
|
||||
@@ -59,7 +62,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
||||
RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)"
|
||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||
else
|
||||
@@ -71,7 +74,7 @@ jobs:
|
||||
|
||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
||||
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
@@ -81,17 +84,19 @@ jobs:
|
||||
|
||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||
- name: "Install MSRV version"
|
||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
||||
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
# End Install the MSRV channel to be used
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@2656b87321093db1cb55fbd73183d195214fdfd1 # v2.5.0
|
||||
# End Enable Rust Caching
|
||||
|
||||
# Set the current matrix toolchain version as default
|
||||
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||
run: |
|
||||
# Remove the rust-toolchain.toml
|
||||
rm rust-toolchain.toml
|
||||
# Set the default
|
||||
rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||
|
||||
# Show environment
|
||||
- name: "Show environment"
|
||||
@@ -100,47 +105,55 @@ jobs:
|
||||
cargo -vV
|
||||
# End Show environment
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
|
||||
with:
|
||||
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||
# Only update when really needed! Use a <year>.<month>[.<inc>] format.
|
||||
prefix-key: "v2023.07-rust"
|
||||
# End Enable Rust Caching
|
||||
|
||||
# Run cargo tests (In release mode to speed up future builds)
|
||||
# Run cargo tests
|
||||
# First test all features together, afterwards test them separately.
|
||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: test_sqlite_mysql_postgresql_mimalloc
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features sqlite,mysql,postgresql,enable_mimalloc
|
||||
cargo test --features sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
- name: "test features: sqlite,mysql,postgresql"
|
||||
id: test_sqlite_mysql_postgresql
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features sqlite,mysql,postgresql
|
||||
cargo test --features sqlite,mysql,postgresql
|
||||
|
||||
- name: "test features: sqlite"
|
||||
id: test_sqlite
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features sqlite
|
||||
cargo test --features sqlite
|
||||
|
||||
- name: "test features: mysql"
|
||||
id: test_mysql
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features mysql
|
||||
cargo test --features mysql
|
||||
|
||||
- name: "test features: postgresql"
|
||||
id: test_postgresql
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features postgresql
|
||||
cargo test --features postgresql
|
||||
# End Run cargo tests
|
||||
|
||||
|
||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
||||
# Run cargo clippy, and fail on warnings
|
||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: clippy
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo clippy --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||
# End Run cargo clippy
|
||||
|
||||
|
||||
@@ -182,21 +195,3 @@ jobs:
|
||||
run: |
|
||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
# Build the binary to upload to the artifacts
|
||||
- name: "build features: sqlite,mysql,postgresql"
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo build --release --features sqlite,mysql,postgresql
|
||||
# End Build the binary
|
||||
|
||||
|
||||
# Upload artifact to Github Actions
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
name: vaultwarden
|
||||
path: target/release/vaultwarden
|
||||
# End Upload artifact to Github Actions
|
||||
|
7
.github/workflows/hadolint.yml
vendored
7
.github/workflows/hadolint.yml
vendored
@@ -8,15 +8,14 @@ on: [
|
||||
jobs:
|
||||
hadolint:
|
||||
name: Validate Dockerfile syntax
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||
- name: Download hadolint
|
||||
shell: bash
|
||||
@@ -30,5 +29,5 @@ jobs:
|
||||
# Test Dockerfiles
|
||||
- name: Run hadolint
|
||||
shell: bash
|
||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored --cached | xargs hadolint
|
||||
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||
# End Test Dockerfiles
|
||||
|
219
.github/workflows/release.yml
vendored
219
.github/workflows/release.yml
vendored
@@ -6,15 +6,15 @@ on:
|
||||
- ".github/workflows/release.yml"
|
||||
- "src/**"
|
||||
- "migrations/**"
|
||||
- "hooks/**"
|
||||
- "docker/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "diesel.toml"
|
||||
- "rust-toolchain"
|
||||
- "rust-toolchain.toml"
|
||||
|
||||
branches: # Only on paths above
|
||||
- main
|
||||
- release-build-revision
|
||||
|
||||
tags: # Always, regardless of paths above
|
||||
- '*'
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
# Some checks to determine if we need to continue with building a new docker.
|
||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||
skip_check:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
@@ -35,23 +35,20 @@ jobs:
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
# Only run this when not creating a tag
|
||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
||||
if: ${{ github.ref_type == 'branch' }}
|
||||
|
||||
docker-build:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 120
|
||||
needs: skip_check
|
||||
# Start a local docker registry to be used to generate multi-arch images.
|
||||
services:
|
||||
registry:
|
||||
image: registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
# TODO: Start a local docker registry to be used to extract the final Alpine static build images
|
||||
# services:
|
||||
# registry:
|
||||
# image: registry:2
|
||||
# ports:
|
||||
# - 5000:5000
|
||||
env:
|
||||
# Use BuildKit (https://docs.docker.com/build/buildkit/) for better
|
||||
# build performance and the ability to copy extended file attributes
|
||||
# (e.g., for executable capabilities) across build phases.
|
||||
DOCKER_BUILDKIT: 1
|
||||
SOURCE_COMMIT: ${{ github.sha }}
|
||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||
# The *_REPO variables need to be configured as repository variables
|
||||
@@ -65,7 +62,6 @@ jobs:
|
||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||
# Check for Quay.io credentials in secrets
|
||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
strategy:
|
||||
matrix:
|
||||
base_image: ["debian","alpine"]
|
||||
@@ -73,163 +69,102 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Determine Docker Tag
|
||||
- name: Init Variables
|
||||
id: vars
|
||||
- name: Initialize QEMU binfmt support
|
||||
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
||||
with:
|
||||
platforms: "arm64,arm"
|
||||
|
||||
# Start Docker Buildx
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||
# https://github.com/moby/buildkit/issues/3969
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
||||
with:
|
||||
config-inline: |
|
||||
[worker.oci]
|
||||
max-parallelism = 2
|
||||
driver-opts: |
|
||||
network=host
|
||||
|
||||
# Determine Base Tags and Source Version
|
||||
- name: Determine Base Tags and Source Version
|
||||
shell: bash
|
||||
run: |
|
||||
# Check which main tag we are going to build determined by github.ref
|
||||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
||||
echo "DOCKER_TAG=${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
|
||||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
||||
echo "DOCKER_TAG=testing" | tee -a "${GITHUB_OUTPUT}"
|
||||
# Check which main tag we are going to build determined by github.ref_type
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
|
||||
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
|
||||
fi
|
||||
# End Determine Docker Tag
|
||||
|
||||
# Get the Source Version for this release
|
||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null || true)"
|
||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||
echo "SOURCE_VERSION=${GIT_EXACT_TAG}" | tee -a "${GITHUB_ENV}"
|
||||
else
|
||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||
echo "SOURCE_VERSION=${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}" | tee -a "${GITHUB_ENV}"
|
||||
fi
|
||||
# End Determine Base Tags
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Add registry for DockerHub
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
- name: Add registry for ghcr.io
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
# Login to Quay.io
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
# Debian
|
||||
|
||||
# Docker Hub
|
||||
- name: Build Debian based images (docker.io)
|
||||
- name: Add registry for Quay.io
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
- name: Push Debian based images (docker.io)
|
||||
shell: bash
|
||||
- name: Bake ${{ matrix.base_image }} containers
|
||||
uses: docker/bake-action@511fde2517761e303af548ec9e0ea74a8a100112 # v4.0.0
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
# GitHub Container Registry
|
||||
- name: Build Debian based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Quay.io
|
||||
- name: Build Debian based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Debian based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
# Alpine
|
||||
|
||||
# Docker Hub
|
||||
- name: Build Alpine based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images (docker.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||
|
||||
# GitHub Container Registry
|
||||
- name: Build Alpine based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images (ghcr.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
||||
|
||||
# Quay.io
|
||||
- name: Build Alpine based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/build
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
|
||||
- name: Push Alpine based images (quay.io)
|
||||
shell: bash
|
||||
env:
|
||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
||||
run: |
|
||||
./hooks/push
|
||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
||||
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||
SOURCE_VERSION: "${{ env.SOURCE_VERSION }}"
|
||||
SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}"
|
||||
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
|
||||
with:
|
||||
pull: true
|
||||
push: true
|
||||
files: docker/docker-bake.hcl
|
||||
targets: "${{ matrix.base_image }}-multi"
|
||||
|
43
.github/workflows/trivy.yml
vendored
Normal file
43
.github/workflows/trivy.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: trivy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release-build-revision
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '00 12 * * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
trivy-scan:
|
||||
name: Check
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@f78e9ecf42a1271402d4f484518b9313235990e1 # v0.13.1
|
||||
with:
|
||||
scan-type: repo
|
||||
ignore-unfixed: true
|
||||
format: sarif
|
||||
output: trivy-results.sarif
|
||||
severity: CRITICAL,HIGH
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@bad341350a2f5616f9e048e51360cedc49181ce8 # v2.22.4
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
@@ -1,10 +1,12 @@
|
||||
ignored:
|
||||
# To prevent issues and make clear some images only work on linux/amd64, we ignore this
|
||||
- DL3029
|
||||
# disable explicit version for apt install
|
||||
- DL3008
|
||||
# disable explicit version for apk install
|
||||
- DL3018
|
||||
# disable check for consecutive `RUN` instructions
|
||||
- DL3059
|
||||
# Ignore shellcheck info message
|
||||
- SC1091
|
||||
trustedRegistries:
|
||||
- docker.io
|
||||
- ghcr.io
|
||||
|
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
|
1101
Cargo.lock
generated
1101
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
78
Cargo.toml
78
Cargo.toml
@@ -3,7 +3,7 @@ name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.68.2"
|
||||
rust-version = "1.71.1"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
@@ -40,9 +40,9 @@ syslog = "6.1.0"
|
||||
|
||||
[dependencies]
|
||||
# Logging
|
||||
log = "0.4.19"
|
||||
fern = { version = "0.6.2", features = ["syslog-6"] }
|
||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
log = "0.4.20"
|
||||
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
||||
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenvy = { version = "0.15.7", default-features = false }
|
||||
@@ -51,8 +51,8 @@ dotenvy = { version = "0.15.7", default-features = false }
|
||||
once_cell = "1.18.0"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.15"
|
||||
num-derive = "0.4.0"
|
||||
num-traits = "0.2.17"
|
||||
num-derive = "0.4.1"
|
||||
|
||||
# Web framework
|
||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
||||
@@ -61,21 +61,21 @@ rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46
|
||||
|
||||
# WebSockets libraries
|
||||
tokio-tungstenite = "0.19.0"
|
||||
rmpv = "1.0.0" # MessagePack library
|
||||
rmpv = "1.0.1" # MessagePack library
|
||||
|
||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||
dashmap = "5.4.0"
|
||||
dashmap = "5.5.3"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.28"
|
||||
tokio = { version = "1.29.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
tokio = { version = "1.33.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.166", features = ["derive"] }
|
||||
serde_json = "1.0.99"
|
||||
serde = { version = "1.0.189", features = ["derive"] }
|
||||
serde_json = "1.0.107"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "2.1.0", features = ["chrono", "r2d2"] }
|
||||
diesel = { version = "2.1.3", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "2.1.0"
|
||||
diesel_logger = { version = "0.3.0", optional = true }
|
||||
|
||||
@@ -84,15 +84,15 @@ libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
ring = "0.16.20"
|
||||
ring = "0.17.5"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "1.4.0", features = ["v4"] }
|
||||
uuid = { version = "1.5.0", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.26", features = ["clock", "serde"], default-features = false }
|
||||
chrono = { version = "0.4.31", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.8.3"
|
||||
time = "0.3.22"
|
||||
time = "0.3.30"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler_ng = "2.0.4"
|
||||
@@ -101,7 +101,7 @@ job_scheduler_ng = "2.0.4"
|
||||
data-encoding = "2.4.0"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "8.3.0"
|
||||
jsonwebtoken = "9.0.0"
|
||||
|
||||
# TOTP library
|
||||
totp-lite = "2.0.0"
|
||||
@@ -113,71 +113,77 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
||||
webauthn-rs = "0.3.2"
|
||||
|
||||
# Handling of URL's for WebAuthn and favicons
|
||||
url = "2.4.0"
|
||||
url = "2.4.1"
|
||||
|
||||
# Email libraries
|
||||
lettre = { version = "0.10.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
lettre = { version = "0.11.0", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.4"
|
||||
|
||||
# HTML Template library
|
||||
handlebars = { version = "4.3.7", features = ["dir_source"] }
|
||||
handlebars = { version = "4.4.0", features = ["dir_source"] }
|
||||
|
||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||
reqwest = { version = "0.11.18", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
reqwest = { version = "0.11.22", features = ["stream", "json", "deflate", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
|
||||
|
||||
# Favicon extraction libraries
|
||||
html5gum = "0.5.3"
|
||||
regex = { version = "1.8.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
html5gum = "0.5.7"
|
||||
regex = { version = "1.10.2", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.3.0"
|
||||
bytes = "1.4.0"
|
||||
bytes = "1.5.0"
|
||||
|
||||
# Cache function results (Used for version check and favicon fetching)
|
||||
cached = "0.44.0"
|
||||
cached = { version = "0.46.0", features = ["async"] }
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.16.2"
|
||||
cookie_store = "0.19.1"
|
||||
|
||||
# Used by U2F, JWT and PostgreSQL
|
||||
openssl = "0.10.55"
|
||||
openssl = "0.10.57"
|
||||
# Set openssl-sys fixed to v0.9.92 to prevent building issues with musl, arm and 32bit pointer width
|
||||
# It will force add a dynamically linked library which prevents the build from being static
|
||||
openssl-sys = "=0.9.92"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.13"
|
||||
governor = "0.5.1"
|
||||
paste = "1.0.14"
|
||||
governor = "0.6.0"
|
||||
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.17"
|
||||
semver = "1.0.20"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.37", features = ["secure"], default-features = false, optional = true }
|
||||
which = "4.4.0"
|
||||
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
|
||||
which = "5.0.0"
|
||||
|
||||
# Argon2 library with support for the PHC format
|
||||
argon2 = "0.5.0"
|
||||
argon2 = "0.5.2"
|
||||
|
||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||
rpassword = "7.2.0"
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
||||
# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
||||
|
||||
|
||||
# Strip debuginfo from the release builds
|
||||
# Also enable thin LTO for some optimizations
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
|
||||
# Always build argon2 using opt-level 3
|
||||
# This is a huge speed improvement during testing
|
||||
[profile.dev.package.argon2]
|
||||
opt-level = 3
|
||||
|
||||
# A little bit of a speedup
|
||||
[profile.dev]
|
||||
split-debuginfo = "unpacked"
|
||||
|
||||
# Always build argon2 using opt-level 3
|
||||
# This is a huge speed improvement during testing
|
||||
[profile.dev.package.argon2]
|
||||
opt-level = 3
|
||||
|
@@ -1 +1 @@
|
||||
docker/amd64/Dockerfile
|
||||
docker/Dockerfile.debian
|
@@ -42,7 +42,7 @@ docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p
|
||||
```
|
||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||
|
||||
**IMPORTANT**: Most modern web browsers, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
|
||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
|
||||
|
2
build.rs
2
build.rs
@@ -18,7 +18,7 @@ fn main() {
|
||||
);
|
||||
|
||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||
compile_error!("Query Logging is only allowed during development, it is not intented for production usage!");
|
||||
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
|
||||
|
||||
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||
// If neither exist, read from git.
|
||||
|
28
docker/DockerSettings.yaml
Normal file
28
docker/DockerSettings.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
vault_version: "v2023.10.0"
|
||||
vault_image_digest: "sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935"
|
||||
# Cross Compile Docker Helper Scripts v1.3.0
|
||||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
|
||||
rust_version: 1.73.0 # Rust version to be used
|
||||
debian_version: bookworm # Debian release name to be used
|
||||
alpine_version: 3.18 # Alpine version to be used
|
||||
# For which platforms/architectures will we try to build images
|
||||
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||
# Determine the build images per OS/Arch
|
||||
build_stage_image:
|
||||
debian:
|
||||
image: "docker.io/library/rust:{{rust_version}}-slim-{{debian_version}}"
|
||||
platform: "$BUILDPLATFORM"
|
||||
alpine:
|
||||
image: "build_${TARGETARCH}${TARGETVARIANT}"
|
||||
platform: "linux/amd64" # The Alpine build images only have linux/amd64 images
|
||||
arch_image:
|
||||
amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}"
|
||||
arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}"
|
||||
armv7: "ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-{{rust_version}}"
|
||||
armv6: "ghcr.io/blackdex/rust-musl:arm-musleabi-stable-{{rust_version}}"
|
||||
# The final image which will be used to distribute the container images
|
||||
runtime_stage_image:
|
||||
debian: "docker.io/library/debian:{{debian_version}}-slim"
|
||||
alpine: "docker.io/library/alpine:{{alpine_version}}"
|
160
docker/Dockerfile.alpine
Normal file
160
docker/Dockerfile.alpine
Normal file
@@ -0,0 +1,160 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
|
||||
# [docker.io/vaultwarden/web-vault:v2023.10.0]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
|
||||
|
||||
########################## ALPINE BUILD IMAGES ##########################
|
||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.73.0 as build_amd64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.73.0 as build_arm64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.73.0 as build_armv7
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.73.0 as build_armv6
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root" \
|
||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||
# Debian Bookworm already contains libpq v15
|
||||
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Shared variables across Debian and Alpine
|
||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
RUN source /env-cargo && \
|
||||
rustup target add "${CARGO_TARGET}"
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
ARG VW_VERSION
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN source /env-cargo && \
|
||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||
find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Builds again, this time it will be the actual source files being build
|
||||
RUN source /env-cargo && \
|
||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||
touch src/main.rs && \
|
||||
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||
else \
|
||||
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||
fi
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
#
|
||||
# To build these images you need to have qemu binfmt support.
|
||||
# See the following pages to help install these tools locally
|
||||
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||
#
|
||||
# Or use a Docker image which modifies your host system to support this.
|
||||
# The GitHub Actions Workflow uses the same image as used below.
|
||||
# See: https://github.com/tonistiigi/binfmt
|
||||
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||
#
|
||||
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.18
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data && \
|
||||
apk --no-cache add \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/final/vaultwarden .
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,34 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||
# manifest, rather than the target arch. For example:
|
||||
#
|
||||
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||
# amd64
|
||||
#
|
||||
# Recent versions of Docker have started printing a warning when the image's
|
||||
# claimed arch doesn't match the host arch. For example:
|
||||
#
|
||||
# WARNING: The requested image's platform (linux/amd64) does not match the
|
||||
# detected host platform (linux/arm/v7) and no specific platform was requested
|
||||
#
|
||||
# The image still works fine, but the spurious warning creates confusion.
|
||||
#
|
||||
# Docker doesn't seem to provide a way to directly set the arch of an image
|
||||
# at build time. To resolve the build vs. target arch discrepancy, we use
|
||||
# Docker Buildx to build a new set of images with the correct target arch.
|
||||
#
|
||||
# Docker Buildx uses this Dockerfile to build an image for each requested
|
||||
# platform. Since the Dockerfile basically consists of a single `FROM`
|
||||
# instruction, we're effectively telling Buildx to build a platform-specific
|
||||
# image by simply copying the existing cross-built image and setting the
|
||||
# correct target arch as a side effect.
|
||||
#
|
||||
# References:
|
||||
#
|
||||
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
||||
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
||||
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
||||
#
|
||||
ARG LOCAL_REPO
|
||||
ARG DOCKER_TAG
|
||||
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
194
docker/Dockerfile.debian
Normal file
194
docker/Dockerfile.debian
Normal file
@@ -0,0 +1,194 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
|
||||
# [docker.io/vaultwarden/web-vault:v2023.10.0]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
|
||||
|
||||
########################## Cross Compile Docker Helper Scripts ##########################
|
||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||
## And these bash scripts do not have any significant difference if at all
|
||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.73.0-slim-bookworm as build
|
||||
COPY --from=xx / /
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Install clang to get `xx-cargo` working
|
||||
# Install pkg-config to allow amd64 builds to find all libraries
|
||||
# Install git so build.rs can determine the correct version
|
||||
# Install the libc cross packages based upon the debian-arch
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
--no-install-recommends \
|
||||
clang \
|
||||
pkg-config \
|
||||
git \
|
||||
"libc6-$(xx-info debian-arch)-cross" \
|
||||
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||
|
||||
RUN xx-apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc \
|
||||
libmariadb3 \
|
||||
libpq-dev \
|
||||
libpq5 \
|
||||
libssl-dev && \
|
||||
# Force install arch dependend mariadb dev packages
|
||||
# Installing them the normal way breaks several other packages (again)
|
||||
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Environment variables for cargo across Debian and Alpine
|
||||
RUN source /env-cargo && \
|
||||
if xx-info is-cross ; then \
|
||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||
fi && \
|
||||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
RUN source /env-cargo && \
|
||||
rustup target add "${CARGO_TARGET}"
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
ARG VW_VERSION
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN source /env-cargo && \
|
||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||
find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Builds again, this time it will be the actual source files being build
|
||||
RUN source /env-cargo && \
|
||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||
touch src/main.rs && \
|
||||
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||
else \
|
||||
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||
fi
|
||||
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
#
|
||||
# To build these images you need to have qemu binfmt support.
|
||||
# See the following pages to help install these tools locally
|
||||
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||
#
|
||||
# Or use a Docker image which modifies your host system to support this.
|
||||
# The GitHub Actions Workflow uses the same image as used below.
|
||||
# See: https://github.com/tonistiigi/binfmt
|
||||
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||
#
|
||||
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||
FROM --platform=$TARGETPLATFORM docker.io/library/debian:bookworm-slim
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data && \
|
||||
apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/final/vaultwarden .
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,68 +1,14 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
{% set rust_version = "1.70.0" %}
|
||||
{% set debian_version = "bullseye" %}
|
||||
{% set alpine_version = "3.17" %}
|
||||
{% set build_stage_base_image = "docker.io/library/rust:%s-%s" % (rust_version, debian_version) %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:x86_64-musl-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/library/alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:armv7-musleabihf-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:arm-musleabi-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:aarch64-musl-stable-%s" % rust_version %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-alpine:%s" % alpine_version %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
{% set runtime_stage_base_image = "docker.io/library/debian:%s-slim" % debian_version %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-debian:%s" % debian_version %}
|
||||
{% set package_arch_name = "arm64" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-debian:%s" % debian_version %}
|
||||
{% set package_arch_name = "armel" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-debian:%s" % debian_version %}
|
||||
{% set package_arch_name = "armhf" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||
{% endif %}
|
||||
{% if package_arch_name is defined %}
|
||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
||||
{% else %}
|
||||
{% set package_arch_prefix = "" %}
|
||||
{% endif %}
|
||||
{% if package_arch_target is defined %}
|
||||
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
||||
{% else %}
|
||||
{% set package_arch_target_param = "" %}
|
||||
{% endif %}
|
||||
{% if "buildkit" in target_file %}
|
||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
||||
{% else %}
|
||||
{% set mount_rust_cache = "" %}
|
||||
{% endif %}
|
||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_version = "v2023.5.0" %}
|
||||
{% set vault_image_digest = "sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085" %}
|
||||
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
@@ -80,10 +26,33 @@
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
{% if base == "debian" %}
|
||||
########################## Cross Compile Docker Helper Scripts ##########################
|
||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||
## And these bash scripts do not have any significant difference if at all
|
||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
|
||||
{% elif base == "alpine" %}
|
||||
########################## ALPINE BUILD IMAGES ##########################
|
||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||
{% for arch in build_stage_image[base].arch_image %}
|
||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build
|
||||
{% if base == "debian" %}
|
||||
COPY --from=xx / /
|
||||
{% endif %}
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -92,134 +61,161 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
{%- if base == "alpine" %} \
|
||||
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||
# Debian Bookworm already contains libpq v15
|
||||
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||
{% endif %}
|
||||
|
||||
{% if base == "debian" %}
|
||||
|
||||
# Install clang to get `xx-cargo` working
|
||||
# Install pkg-config to allow amd64 builds to find all libraries
|
||||
# Install git so build.rs can determine the correct version
|
||||
# Install the libc cross packages based upon the debian-arch
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
--no-install-recommends \
|
||||
clang \
|
||||
pkg-config \
|
||||
git \
|
||||
"libc6-$(xx-info debian-arch)-cross" \
|
||||
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||
|
||||
RUN xx-apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc \
|
||||
libmariadb3 \
|
||||
libpq-dev \
|
||||
libpq5 \
|
||||
libssl-dev && \
|
||||
# Force install arch dependend mariadb dev packages
|
||||
# Installing them the normal way breaks several other packages (again)
|
||||
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||
{% endif %}
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "armv6" in target_file %}
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
||||
{% endif %}
|
||||
{% elif "arm" in target_file %}
|
||||
# Install build dependencies for the {{ package_arch_name }} architecture
|
||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
libc6-dev{{ package_arch_prefix }} \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
||||
libmariadb3{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
{% elif "amd64" in target_file %}
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev
|
||||
{% endif %}
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
{% if package_arch_target is defined %}
|
||||
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
||||
{% endif %}
|
||||
{% if base == "debian" %}
|
||||
# Environment variables for cargo across Debian and Alpine
|
||||
RUN source /env-cargo && \
|
||||
if xx-info is-cross ; then \
|
||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||
fi && \
|
||||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
{% if "alpine" in target_file %}
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% elif base == "alpine" %}
|
||||
# Shared variables across Debian and Alpine
|
||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
{% else %}
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% endif %}
|
||||
|
||||
RUN source /env-cargo && \
|
||||
rustup target add "${CARGO_TARGET}"
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
ARG VW_VERSION
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
||||
&& find . -not -path "./target*" -delete
|
||||
RUN source /env-cargo && \
|
||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||
find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
# Builds again, this time it will be the actual source files being build
|
||||
RUN source /env-cargo && \
|
||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||
touch src/main.rs && \
|
||||
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||
else \
|
||||
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||
fi
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM {{ runtime_stage_base_image }}
|
||||
#
|
||||
# To build these images you need to have qemu binfmt support.
|
||||
# See the following pages to help install these tools locally
|
||||
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||
#
|
||||
# Or use a Docker image which modifies your host system to support this.
|
||||
# The GitHub Actions Workflow uses the same image as used below.
|
||||
# See: https://github.com/tonistiigi/binfmt
|
||||
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||
#
|
||||
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||
FROM --platform=$TARGETPLATFORM {{ runtime_stage_image[base] }}
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
{%- if "alpine" in runtime_stage_base_image %} \
|
||||
{%- if base == "debian" %} \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
{% elif base == "alpine" %} \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
RUN [ "cross-build-start" ]
|
||||
{% endif %}
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
&& apk add --no-cache \
|
||||
RUN mkdir /data && \
|
||||
{% if base == "debian" %}
|
||||
apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
{% elif base == "alpine" %}
|
||||
apk --no-cache add \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
{% else %}
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
|
||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
{% endif -%}
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
RUN [ "cross-build-end" ]
|
||||
{% endif %}
|
||||
|
||||
VOLUME /data
|
||||
@@ -229,16 +225,13 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
{% if package_arch_target is defined %}
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
||||
{% else %}
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
{% endif %}
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/final/vaultwarden .
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -1,15 +1,4 @@
|
||||
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
||||
|
||||
all: $(OBJECTS)
|
||||
|
||||
%/Dockerfile: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildkit: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.buildkit.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
all:
|
||||
./render_template Dockerfile.j2 '{"base": "debian"}' > Dockerfile.debian
|
||||
./render_template Dockerfile.j2 '{"base": "alpine"}' > Dockerfile.alpine
|
||||
.PHONY: all
|
||||
|
184
docker/README.md
184
docker/README.md
@@ -1,3 +1,183 @@
|
||||
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
||||
# Vaultwarden Container Building
|
||||
|
||||
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
||||
To build and release new testing and stable releases of Vaultwarden we use `docker buildx bake`.<br>
|
||||
This can be used locally by running the command yourself, but it is also used by GitHub Actions.
|
||||
|
||||
This makes it easier for us to test and maintain the different architectures we provide.<br>
|
||||
We also just have two Dockerfile's one for Debian and one for Alpine based images.<br>
|
||||
With just these two files we can build both Debian and Alpine images for the following platforms:
|
||||
- amd64 (linux/amd64)
|
||||
- arm64 (linux/arm64)
|
||||
- armv7 (linux/arm/v7)
|
||||
- armv6 (linux/arm/v6)
|
||||
|
||||
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||
This ensures the container build process can run binaries from other architectures.<br>
|
||||
|
||||
**NOTE**: Run all the examples below from the root of the repo.<br>
|
||||
|
||||
|
||||
## How to install QEMU binfmt support
|
||||
|
||||
This is different per host OS, but most support this in some way.<br>
|
||||
|
||||
### Ubuntu/Debian
|
||||
```bash
|
||||
apt install binfmt-support qemu-user-static
|
||||
```
|
||||
|
||||
### Arch Linux (others based upon it)
|
||||
```bash
|
||||
pacman -S qemu-user-static qemu-user-static-binfmt
|
||||
```
|
||||
|
||||
### Fedora
|
||||
```bash
|
||||
dnf install qemu-user-static
|
||||
```
|
||||
|
||||
### Others
|
||||
There also is an option to use an other docker container to provide support for this.
|
||||
```bash
|
||||
# To install and activate
|
||||
docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||
# To unistall
|
||||
docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||
```
|
||||
|
||||
|
||||
## Single architecture container building
|
||||
|
||||
You can build a container per supported architecture as long as you have QEMU binfmt support installed on your system.<br>
|
||||
|
||||
```bash
|
||||
# Default bake triggers a Debian build using the hosts architecture
|
||||
docker buildx bake --file docker/docker-bake.hcl
|
||||
|
||||
# Bake Debian ARM64 using a debug build
|
||||
CARGO_PROFILE=dev \
|
||||
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||
docker buildx bake --file docker/docker-bake.hcl debian-arm64
|
||||
|
||||
# Bake Alpine ARMv6 as a release build
|
||||
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||
docker buildx bake --file docker/docker-bake.hcl alpine-armv6
|
||||
```
|
||||
|
||||
|
||||
## Local Multi Architecture container building
|
||||
|
||||
Start the initialization, this only needs to be done once.
|
||||
|
||||
```bash
|
||||
# Create and use a new buildx builder instance which connects to the host network
|
||||
docker buildx create --name vaultwarden --use --driver-opt network=host
|
||||
|
||||
# Validate it runs
|
||||
docker buildx inspect --bootstrap
|
||||
|
||||
# Create a local container registry directly reachable on the localhost
|
||||
docker run -d --name registry --network host registry:2
|
||||
```
|
||||
|
||||
After that is done, you should be able to build and push to the local registry.<br>
|
||||
Use the following command with the modified variables to bake the Alpine images.<br>
|
||||
Replace `alpine` with `debian` if you want to build the debian multi arch images.
|
||||
|
||||
```bash
|
||||
# Start a buildx bake using a debug build
|
||||
CARGO_PROFILE=dev \
|
||||
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||
docker buildx bake --file docker/docker-bake.hcl alpine-multi
|
||||
```
|
||||
|
||||
|
||||
## Using the `bake.sh` script
|
||||
|
||||
To make it a bit more easier to trigger a build, there also is a `bake.sh` script.<br>
|
||||
This script calls `docker buildx bake` with all the right parameters and also generates the `SOURCE_COMMIT` and `SOURCE_VERSION` variables.<br>
|
||||
This script can be called from both the repo root or within the docker directory.
|
||||
|
||||
So, if you want to build a Multi Arch Alpine container pushing to your localhost registry you can run this from within the docker directory. (Just make sure you executed the initialization steps above first)
|
||||
```bash
|
||||
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||
./bake.sh alpine-multi
|
||||
```
|
||||
|
||||
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||
```bash
|
||||
docker/bake.sh
|
||||
```
|
||||
|
||||
You can append both `alpine` and `debian` with `-amd64`, `-arm64`, `-armv7` or `-armv6`, which will trigger a build for that specific platform.<br>
|
||||
This will also append those values to the tag so you can see the builded container when running `docker images`.
|
||||
|
||||
You can also append extra arguments after the target if you want. This can be useful for example to print what bake will use.
|
||||
```bash
|
||||
docker/bake.sh alpine-all --print
|
||||
```
|
||||
|
||||
### Testing baked images
|
||||
|
||||
To test these images you can run these images by using the correct tag and provide the platform.<br>
|
||||
For example, after you have build an arm64 image via `./bake.sh debian-arm64` you can run:
|
||||
```bash
|
||||
docker run --rm -it \
|
||||
-e DISABLE_ADMIN_TOKEN=true \
|
||||
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||
-p8080:80 --platform=linux/arm64 \
|
||||
vaultwarden/server:testing-arm64
|
||||
```
|
||||
|
||||
|
||||
## Using the `podman-bake.sh` script
|
||||
|
||||
To also make building easier using podman, there is a `podman-bake.sh` script.<br>
|
||||
This script calls `podman buildx build` with the needed parameters and the same as `bake.sh`, it will generate some variables automatically.<br>
|
||||
This script can be called from both the repo root or within the docker directory.
|
||||
|
||||
**NOTE:** Unlike the `bake.sh` script, this only supports a single `CONTAINER_REGISTRIES`, and a single `BASE_TAGS` value, no comma separated values. It also only supports building separate architectures, no Multi Arch containers.
|
||||
|
||||
To build an Alpine arm64 image with only sqlite support and mimalloc, run this:
|
||||
```bash
|
||||
DB="sqlite,enable_mimalloc" \
|
||||
./podman-bake.sh alpine-arm64
|
||||
```
|
||||
|
||||
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||
```bash
|
||||
docker/podman-bake.sh
|
||||
```
|
||||
|
||||
You can append extra arguments after the target if you want. This can be useful for example to disable cache like this.
|
||||
```bash
|
||||
./podman-bake.sh alpine-arm64 --no-cache
|
||||
```
|
||||
|
||||
For the podman builds you can, just like the `bake.sh` script, also append the architecture to build for that specific platform.<br>
|
||||
|
||||
### Testing podman builded images
|
||||
|
||||
The command to start a podman built container is almost the same as for the docker/bake built containers. The images start with `localhost/`, so you need to prepend that.
|
||||
|
||||
```bash
|
||||
podman run --rm -it \
|
||||
-e DISABLE_ADMIN_TOKEN=true \
|
||||
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||
-p8080:80 --platform=linux/arm64 \
|
||||
localhost/vaultwarden/server:testing-arm64
|
||||
```
|
||||
|
||||
|
||||
## Variables supported
|
||||
| Variable | default | description |
|
||||
| --------------------- | ------------------ | ----------- |
|
||||
| CARGO_PROFILE | null | Which cargo profile to use. `null` means what is defined in the Dockerfile |
|
||||
| DB | null | Which `features` to build. `null` means what is defined in the Dockerfile |
|
||||
| SOURCE_REPOSITORY_URL | null | The source repository form where this build is triggered |
|
||||
| SOURCE_COMMIT | null | The commit hash of the current commit for this build |
|
||||
| SOURCE_VERSION | null | The current exact tag of this commit, else the last tag and the first 8 chars of the source commit |
|
||||
| BASE_TAGS | testing | Tags to be used. Can be a comma separated value like "latest,1.29.2" |
|
||||
| CONTAINER_REGISTRIES | vaultwarden/server | Comma separated value of container registries. Like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server` |
|
||||
| VW_VERSION | null | To override the `SOURCE_VERSION` value. This is also used by the `build.rs` code for example |
|
||||
|
@@ -1,118 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/library/debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,112 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/library/alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,118 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/library/debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,112 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/library/alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,139 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install build dependencies for the arm64 architecture
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libc6-dev:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libssl-dev:arm64 \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,114 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add aarch64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,139 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install build dependencies for the arm64 architecture
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libc6-dev:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libpq-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libssl-dev:arm64 \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,114 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,143 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install build dependencies for the armel architecture
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libc6-dev:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
libmariadb3:armel \
|
||||
libpq-dev:armel \
|
||||
libpq5:armel \
|
||||
libssl-dev:armel \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,116 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add arm-unknown-linux-musleabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,143 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install build dependencies for the armel architecture
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libc6-dev:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
libmariadb3:armel \
|
||||
libpq-dev:armel \
|
||||
libpq5:armel \
|
||||
libssl-dev:armel \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,116 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,139 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install build dependencies for the armhf architecture
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libc6-dev:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
libmariadb3:armhf \
|
||||
libpq-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libssl-dev:armhf \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,114 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,139 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# Install build dependencies for the armhf architecture
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
libc6-dev:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
libmariadb3:armhf \
|
||||
libpq-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libssl-dev:armhf \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
openssl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
@@ -1,114 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
||||
#
|
||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
openssl \
|
||||
tzdata
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
15
docker/bake.sh
Executable file
15
docker/bake.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Determine the basedir of this script.
|
||||
# It should be located in the same directory as the docker-bake.hcl
|
||||
# This ensures you can run this script from both inside and outside of the docker directory
|
||||
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||
|
||||
# Load build env's
|
||||
source "${BASEDIR}/bake_env.sh"
|
||||
|
||||
# Be verbose on what is being executed
|
||||
set -x
|
||||
|
||||
# Make sure we set the context to `..` so it will go up one directory
|
||||
docker buildx bake --progress plain --set "*.context=${BASEDIR}/.." -f "${BASEDIR}/docker-bake.hcl" "$@"
|
33
docker/bake_env.sh
Normal file
33
docker/bake_env.sh
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# If SOURCE_COMMIT is provided via env skip this
|
||||
if [ -z "${SOURCE_COMMIT+x}" ]; then
|
||||
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
|
||||
# If VW_VERSION is provided via env use it as SOURCE_VERSION
|
||||
# Else define it using git
|
||||
if [[ -n "${VW_VERSION}" ]]; then
|
||||
SOURCE_VERSION="${VW_VERSION}"
|
||||
else
|
||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||
else
|
||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||
GIT_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
case "${GIT_BRANCH}" in
|
||||
main|master|HEAD)
|
||||
# Do not add the branch name for these branches
|
||||
;;
|
||||
*)
|
||||
SOURCE_VERSION="${SOURCE_VERSION} (${GIT_BRANCH})"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
|
||||
# Export the rendered variables above so bake will use them
|
||||
export SOURCE_COMMIT
|
||||
export SOURCE_VERSION
|
229
docker/docker-bake.hcl
Normal file
229
docker/docker-bake.hcl
Normal file
@@ -0,0 +1,229 @@
|
||||
// ==== Baking Variables ====
|
||||
|
||||
// Set which cargo profile to use, dev or release for example
|
||||
// Use the value provided in the Dockerfile as default
|
||||
variable "CARGO_PROFILE" {
|
||||
default = null
|
||||
}
|
||||
|
||||
// Set which DB's (features) to enable
|
||||
// Use the value provided in the Dockerfile as default
|
||||
variable "DB" {
|
||||
default = null
|
||||
}
|
||||
|
||||
// The repository this build was triggered from
|
||||
variable "SOURCE_REPOSITORY_URL" {
|
||||
default = null
|
||||
}
|
||||
|
||||
// The commit hash of of the current commit this build was triggered on
|
||||
variable "SOURCE_COMMIT" {
|
||||
default = null
|
||||
}
|
||||
|
||||
// The version of this build
|
||||
// Typically the current exact tag of this commit,
|
||||
// else the last tag and the first 8 characters of the source commit
|
||||
variable "SOURCE_VERSION" {
|
||||
default = null
|
||||
}
|
||||
|
||||
// This can be used to overwrite SOURCE_VERSION
|
||||
// It will be used during the build.rs building stage
|
||||
variable "VW_VERSION" {
|
||||
default = null
|
||||
}
|
||||
|
||||
// The base tag(s) to use
|
||||
// This can be a comma separated value like "testing,1.29.2"
|
||||
variable "BASE_TAGS" {
|
||||
default = "testing"
|
||||
}
|
||||
|
||||
// Which container registries should be used for the tagging
|
||||
// This can be a comma separated value
|
||||
// Use a full URI like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server`
|
||||
variable "CONTAINER_REGISTRIES" {
|
||||
default = "vaultwarden/server"
|
||||
}
|
||||
|
||||
|
||||
// ==== Baking Groups ====
|
||||
|
||||
group "default" {
|
||||
targets = ["debian"]
|
||||
}
|
||||
|
||||
|
||||
// ==== Shared Baking ====
|
||||
function "labels" {
|
||||
params = []
|
||||
result = {
|
||||
"org.opencontainers.image.description" = "Unofficial Bitwarden compatible server written in Rust - ${SOURCE_VERSION}"
|
||||
"org.opencontainers.image.licenses" = "AGPL-3.0-only"
|
||||
"org.opencontainers.image.documentation" = "https://github.com/dani-garcia/vaultwarden/wiki"
|
||||
"org.opencontainers.image.url" = "https://github.com/dani-garcia/vaultwarden"
|
||||
"org.opencontainers.image.created" = "${formatdate("YYYY-MM-DD'T'hh:mm:ssZZZZZ", timestamp())}"
|
||||
"org.opencontainers.image.source" = "${SOURCE_REPOSITORY_URL}"
|
||||
"org.opencontainers.image.revision" = "${SOURCE_COMMIT}"
|
||||
"org.opencontainers.image.version" = "${SOURCE_VERSION}"
|
||||
}
|
||||
}
|
||||
|
||||
target "_default_attributes" {
|
||||
labels = labels()
|
||||
args = {
|
||||
DB = "${DB}"
|
||||
CARGO_PROFILE = "${CARGO_PROFILE}"
|
||||
VW_VERSION = "${VW_VERSION}"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ==== Debian Baking ====
|
||||
|
||||
// Default Debian target, will build a container using the hosts platform architecture
|
||||
target "debian" {
|
||||
inherits = ["_default_attributes"]
|
||||
dockerfile = "docker/Dockerfile.debian"
|
||||
tags = generate_tags("", platform_tag())
|
||||
output = [join(",", flatten([["type=docker"], image_index_annotations()]))]
|
||||
}
|
||||
|
||||
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||
// This is mainly used by GitHub Actions to build and push new containers
|
||||
target "debian-multi" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||
tags = generate_tags("", "")
|
||||
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||
}
|
||||
|
||||
// Per platform targets, to individually test building per platform locally
|
||||
target "debian-amd64" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/amd64"]
|
||||
tags = generate_tags("", "-amd64")
|
||||
}
|
||||
|
||||
target "debian-arm64" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/arm64"]
|
||||
tags = generate_tags("", "-arm64")
|
||||
}
|
||||
|
||||
target "debian-armv7" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/arm/v7"]
|
||||
tags = generate_tags("", "-armv7")
|
||||
}
|
||||
|
||||
target "debian-armv6" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/arm/v6"]
|
||||
tags = generate_tags("", "-armv6")
|
||||
}
|
||||
|
||||
// A Group to build all platforms individually for local testing
|
||||
group "debian-all" {
|
||||
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||
}
|
||||
|
||||
|
||||
// ==== Alpine Baking ====
|
||||
|
||||
// Default Alpine target, will build a container using the hosts platform architecture
|
||||
target "alpine" {
|
||||
inherits = ["_default_attributes"]
|
||||
dockerfile = "docker/Dockerfile.alpine"
|
||||
tags = generate_tags("-alpine", platform_tag())
|
||||
output = [join(",", flatten([["type=docker"], image_index_annotations()]))]
|
||||
}
|
||||
|
||||
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||
// This is mainly used by GitHub Actions to build and push new containers
|
||||
target "alpine-multi" {
|
||||
inherits = ["alpine"]
|
||||
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||
tags = generate_tags("-alpine", "")
|
||||
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||
}
|
||||
|
||||
// Per platform targets, to individually test building per platform locally
|
||||
target "alpine-amd64" {
|
||||
inherits = ["alpine"]
|
||||
platforms = ["linux/amd64"]
|
||||
tags = generate_tags("-alpine", "-amd64")
|
||||
}
|
||||
|
||||
target "alpine-arm64" {
|
||||
inherits = ["alpine"]
|
||||
platforms = ["linux/arm64"]
|
||||
tags = generate_tags("-alpine", "-arm64")
|
||||
}
|
||||
|
||||
target "alpine-armv7" {
|
||||
inherits = ["alpine"]
|
||||
platforms = ["linux/arm/v7"]
|
||||
tags = generate_tags("-alpine", "-armv7")
|
||||
}
|
||||
|
||||
target "alpine-armv6" {
|
||||
inherits = ["alpine"]
|
||||
platforms = ["linux/arm/v6"]
|
||||
tags = generate_tags("-alpine", "-armv6")
|
||||
}
|
||||
|
||||
// A Group to build all platforms individually for local testing
|
||||
group "alpine-all" {
|
||||
targets = ["alpine-amd64", "alpine-arm64", "alpine-armv7", "alpine-armv6"]
|
||||
}
|
||||
|
||||
|
||||
// ==== Bake everything locally ====
|
||||
|
||||
group "all" {
|
||||
targets = ["debian-all", "alpine-all"]
|
||||
}
|
||||
|
||||
|
||||
// ==== Baking functions ====
|
||||
|
||||
// This will return the local platform as amd64, arm64 or armv7 for example
|
||||
// It can be used for creating a local image tag
|
||||
function "platform_tag" {
|
||||
params = []
|
||||
result = "-${replace(replace(BAKE_LOCAL_PLATFORM, "linux/", ""), "/", "")}"
|
||||
}
|
||||
|
||||
|
||||
function "get_container_registries" {
|
||||
params = []
|
||||
result = flatten(split(",", CONTAINER_REGISTRIES))
|
||||
}
|
||||
|
||||
function "get_base_tags" {
|
||||
params = []
|
||||
result = flatten(split(",", BASE_TAGS))
|
||||
}
|
||||
|
||||
function "generate_tags" {
|
||||
params = [
|
||||
suffix, // What to append to the BASE_TAG when needed, like `-alpine` for example
|
||||
platform // the platform we are building for if needed
|
||||
]
|
||||
result = flatten([
|
||||
for registry in get_container_registries() :
|
||||
[for base_tag in get_base_tags() :
|
||||
concat(["${registry}:${base_tag}${suffix}${platform}"])]
|
||||
])
|
||||
}
|
||||
|
||||
function "image_index_annotations" {
|
||||
params = []
|
||||
result = flatten([
|
||||
for key, value in labels() :
|
||||
value != null ? formatlist("annotation-index.%s=%s", "${key}", "${value}") : []
|
||||
])
|
||||
}
|
@@ -10,7 +10,7 @@ CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||
# Given a config key, return the corresponding config value from the
|
||||
# config file. If the key doesn't exist, return an empty string.
|
||||
get_config_val() {
|
||||
local key="$1"
|
||||
key="$1"
|
||||
# Extract a line of the form:
|
||||
# "domain": "https://bw.example.com/path",
|
||||
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||
|
105
docker/podman-bake.sh
Executable file
105
docker/podman-bake.sh
Executable file
@@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Determine the basedir of this script.
|
||||
# It should be located in the same directory as the docker-bake.hcl
|
||||
# This ensures you can run this script from both inside and outside of the docker directory
|
||||
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||
|
||||
# Load build env's
|
||||
source "${BASEDIR}/bake_env.sh"
|
||||
|
||||
# Check if a target is given as first argument
|
||||
# If not we assume the defaults and pass the given arguments to the podman command
|
||||
case "${1}" in
|
||||
alpine*|debian*)
|
||||
TARGET="${1}"
|
||||
# Now shift the $@ array so we only have the rest of the arguments
|
||||
# This allows us too append these as extra arguments too the podman buildx build command
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
|
||||
LABEL_ARGS=(
|
||||
--label org.opencontainers.image.description="Unofficial Bitwarden compatible server written in Rust"
|
||||
--label org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||
--label org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||
--label org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||
--label org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||
)
|
||||
if [[ -n "${SOURCE_REPOSITORY_URL}" ]]; then
|
||||
LABEL_ARGS+=(--label org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}")
|
||||
fi
|
||||
if [[ -n "${SOURCE_COMMIT}" ]]; then
|
||||
LABEL_ARGS+=(--label org.opencontainers.image.revision="${SOURCE_COMMIT}")
|
||||
fi
|
||||
if [[ -n "${SOURCE_VERSION}" ]]; then
|
||||
LABEL_ARGS+=(--label org.opencontainers.image.version="${SOURCE_VERSION}")
|
||||
fi
|
||||
|
||||
# Check if and which --build-arg arguments we need to configure
|
||||
BUILD_ARGS=()
|
||||
if [[ -n "${DB}" ]]; then
|
||||
BUILD_ARGS+=(--build-arg DB="${DB}")
|
||||
fi
|
||||
if [[ -n "${CARGO_PROFILE}" ]]; then
|
||||
BUILD_ARGS+=(--build-arg CARGO_PROFILE="${CARGO_PROFILE}")
|
||||
fi
|
||||
if [[ -n "${VW_VERSION}" ]]; then
|
||||
BUILD_ARGS+=(--build-arg VW_VERSION="${VW_VERSION}")
|
||||
fi
|
||||
|
||||
# Set the default BASE_TAGS if non are provided
|
||||
if [[ -z "${BASE_TAGS}" ]]; then
|
||||
BASE_TAGS="testing"
|
||||
fi
|
||||
|
||||
# Set the default CONTAINER_REGISTRIES if non are provided
|
||||
if [[ -z "${CONTAINER_REGISTRIES}" ]]; then
|
||||
CONTAINER_REGISTRIES="vaultwarden/server"
|
||||
fi
|
||||
|
||||
# Check which Dockerfile we need to use, default is debian
|
||||
case "${TARGET}" in
|
||||
alpine*)
|
||||
BASE_TAGS="${BASE_TAGS}-alpine"
|
||||
DOCKERFILE="Dockerfile.alpine"
|
||||
;;
|
||||
*)
|
||||
DOCKERFILE="Dockerfile.debian"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check which platform we need to build and append the BASE_TAGS with the architecture
|
||||
case "${TARGET}" in
|
||||
*-arm64)
|
||||
BASE_TAGS="${BASE_TAGS}-arm64"
|
||||
PLATFORM="linux/arm64"
|
||||
;;
|
||||
*-armv7)
|
||||
BASE_TAGS="${BASE_TAGS}-armv7"
|
||||
PLATFORM="linux/arm/v7"
|
||||
;;
|
||||
*-armv6)
|
||||
BASE_TAGS="${BASE_TAGS}-armv6"
|
||||
PLATFORM="linux/arm/v6"
|
||||
;;
|
||||
*)
|
||||
BASE_TAGS="${BASE_TAGS}-amd64"
|
||||
PLATFORM="linux/amd64"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Be verbose on what is being executed
|
||||
set -x
|
||||
|
||||
# Build the image with podman
|
||||
# We use the docker format here since we are using `SHELL`, which is not supported by OCI
|
||||
# shellcheck disable=SC2086
|
||||
podman buildx build \
|
||||
--platform="${PLATFORM}" \
|
||||
--tag="${CONTAINER_REGISTRIES}:${BASE_TAGS}" \
|
||||
--format=docker \
|
||||
"${LABEL_ARGS[@]}" \
|
||||
"${BUILD_ARGS[@]}" \
|
||||
--file="${BASEDIR}/${DOCKERFILE}" "$@" \
|
||||
"${BASEDIR}/.."
|
@@ -1,17 +1,31 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os, argparse, json
|
||||
|
||||
import os
|
||||
import argparse
|
||||
import json
|
||||
import yaml
|
||||
import jinja2
|
||||
|
||||
# Load settings file
|
||||
with open("DockerSettings.yaml", 'r') as yaml_file:
|
||||
yaml_data = yaml.safe_load(yaml_file)
|
||||
|
||||
settings_env = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||
)
|
||||
settings_yaml = yaml.safe_load(settings_env.get_template("DockerSettings.yaml").render(yaml_data))
|
||||
|
||||
args_parser = argparse.ArgumentParser()
|
||||
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
||||
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
||||
cli_args = args_parser.parse_args()
|
||||
|
||||
# Merge the default config yaml with the json arguments given.
|
||||
render_vars = json.loads(cli_args.render_vars)
|
||||
settings_yaml.update(render_vars)
|
||||
|
||||
environment = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||
trim_blocks=True,
|
||||
)
|
||||
print(environment.get_template(cli_args.template_file).render(render_vars))
|
||||
print(environment.get_template(cli_args.template_file).render(settings_yaml))
|
||||
|
@@ -1,20 +0,0 @@
|
||||
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
||||
|
||||
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
||||
|
||||
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
||||
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
||||
* `COMMIT_MSG`: the message from the commit being tested and built.
|
||||
* `DOCKER_REPO`: the name of the Docker repository being built.
|
||||
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||
|
||||
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||
|
||||
## References
|
||||
|
||||
* https://docs.docker.com/docker-hub/builds/advanced/
|
||||
* https://docs.docker.com/engine/reference/commandline/manifest/
|
||||
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
||||
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
@@ -1,15 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# The default Debian-based images support these arches for all database backends.
|
||||
arches=(
|
||||
amd64
|
||||
armv6
|
||||
armv7
|
||||
arm64
|
||||
)
|
||||
export arches
|
||||
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
distro_suffix=.alpine
|
||||
fi
|
||||
export distro_suffix
|
51
hooks/build
51
hooks/build
@@ -1,51 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
echo ">>> Building images..."
|
||||
|
||||
# shellcheck source=arches.sh
|
||||
source ./hooks/arches.sh
|
||||
|
||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||
# This var is typically predefined by Docker Hub, but it won't be
|
||||
# when testing locally.
|
||||
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
|
||||
# Construct a version string in the style of `build.rs`.
|
||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||
else
|
||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||
fi
|
||||
|
||||
LABELS=(
|
||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||
org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||
org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||
)
|
||||
LABEL_ARGS=()
|
||||
for label in "${LABELS[@]}"; do
|
||||
LABEL_ARGS+=(--label "${label}")
|
||||
done
|
||||
|
||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildkit as template
|
||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
||||
buildkit_suffix=.buildkit
|
||||
fi
|
||||
|
||||
set -ex
|
||||
|
||||
for arch in "${arches[@]}"; do
|
||||
docker build \
|
||||
"${LABEL_ARGS[@]}" \
|
||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||
-f "docker/${arch}/Dockerfile${buildkit_suffix}${distro_suffix}" \
|
||||
.
|
||||
done
|
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
# If requested, print some environment info for troubleshooting.
|
||||
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
||||
id
|
||||
pwd
|
||||
df -h
|
||||
env
|
||||
docker info
|
||||
docker version
|
||||
fi
|
||||
|
||||
# Install build dependencies.
|
||||
deps=(
|
||||
jq
|
||||
)
|
||||
apt-get update
|
||||
apt-get install -y "${deps[@]}"
|
||||
|
||||
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
||||
# Git operations that we perform later, so fetch the complete history and
|
||||
# tags first. Note that if the build is cached, the clone may have been
|
||||
# unshallowed already; if so, unshallowing will fail, so skip it.
|
||||
if [[ -f .git/shallow ]]; then
|
||||
git fetch --unshallow --tags
|
||||
fi
|
111
hooks/push
111
hooks/push
@@ -1,111 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# shellcheck source=arches.sh
|
||||
source ./hooks/arches.sh
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
# Join a list of args with a single char.
|
||||
# Ref: https://stackoverflow.com/a/17841619
|
||||
join() { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
set -ex
|
||||
|
||||
echo ">>> Starting local Docker registry when needed..."
|
||||
|
||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
||||
# builds, but it can't access existing images on the Docker host (like the
|
||||
# cross-compiled ones we just built). Those images first need to be pushed to
|
||||
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
||||
# up those intermediate images on Docker Hub, it's easier to just run a local
|
||||
# Docker registry, which gets cleaned up automatically once the build job ends.
|
||||
#
|
||||
# https://docs.docker.com/registry/deploying/
|
||||
# https://hub.docker.com/_/registry
|
||||
#
|
||||
# Use host networking so the buildx container can access the registry via
|
||||
# localhost.
|
||||
#
|
||||
# First check if there already is a registry container running, else skip it.
|
||||
# This will only happen either locally or running it via Github Actions
|
||||
#
|
||||
if ! timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5000'; then
|
||||
# defaults to port 5000
|
||||
docker run -d --name registry --network host registry:2
|
||||
fi
|
||||
|
||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
||||
LOCAL_REGISTRY="localhost:5000"
|
||||
REPO="${DOCKER_REPO#*/}"
|
||||
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||
|
||||
echo ">>> Pushing images to local registry..."
|
||||
|
||||
for arch in "${arches[@]}"; do
|
||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||
docker tag "${docker_image}" "${local_image}"
|
||||
docker push "${local_image}"
|
||||
done
|
||||
|
||||
echo ">>> Setting up Docker Buildx..."
|
||||
|
||||
# Same as earlier, use host networking so the buildx container can access the
|
||||
# registry via localhost.
|
||||
#
|
||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
||||
#
|
||||
# Check if there already is a builder running, else skip this and use the existing.
|
||||
# This will only happen either locally or running it via Github Actions
|
||||
#
|
||||
if ! docker buildx inspect builder > /dev/null 2>&1 ; then
|
||||
docker buildx create --name builder --use --driver-opt network=host
|
||||
fi
|
||||
|
||||
echo ">>> Running Docker Buildx..."
|
||||
|
||||
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||
|
||||
# If the Docker tag starts with a version number, assume the latest release
|
||||
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
||||
# to make it easier for users to track the latest release.
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
tags+=("${DOCKER_REPO}:alpine")
|
||||
else
|
||||
tags+=("${DOCKER_REPO}:latest")
|
||||
fi
|
||||
fi
|
||||
|
||||
tag_args=()
|
||||
for tag in "${tags[@]}"; do
|
||||
tag_args+=(--tag "${tag}")
|
||||
done
|
||||
|
||||
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
||||
# the arch list to a platform list (assuming the OS is always `linux`).
|
||||
declare -A arch_to_platform=(
|
||||
[amd64]="linux/amd64"
|
||||
[armv6]="linux/arm/v6"
|
||||
[armv7]="linux/arm/v7"
|
||||
[arm64]="linux/arm64"
|
||||
)
|
||||
platforms=()
|
||||
for arch in "${arches[@]}"; do
|
||||
platforms+=("${arch_to_platform[$arch]}")
|
||||
done
|
||||
platform="$(join "," "${platforms[@]}")"
|
||||
|
||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||
# context, which isn't needed here since the actual cross-compiled images
|
||||
# have already been built.
|
||||
docker buildx build \
|
||||
--network host \
|
||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||
--platform "${platform}" \
|
||||
"${tag_args[@]}" \
|
||||
--push \
|
||||
- < ./docker/Dockerfile.buildx
|
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE auth_requests (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) NOT NULL,
|
||||
organization_uuid CHAR(36),
|
||||
request_device_identifier CHAR(36) NOT NULL,
|
||||
device_type INTEGER NOT NULL,
|
||||
request_ip TEXT NOT NULL,
|
||||
response_device_id CHAR(36),
|
||||
access_code TEXT NOT NULL,
|
||||
public_key TEXT NOT NULL,
|
||||
enc_key TEXT NOT NULL,
|
||||
master_password_hash TEXT NOT NULL,
|
||||
approved BOOLEAN,
|
||||
creation_date DATETIME NOT NULL,
|
||||
response_date DATETIME,
|
||||
authentication_date DATETIME,
|
||||
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||
);
|
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE auth_requests
|
||||
MODIFY master_password_hash TEXT;
|
||||
|
||||
ALTER TABLE auth_requests
|
||||
MODIFY enc_key TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN external_id TEXT;
|
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN `key` TEXT;
|
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE auth_requests (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) NOT NULL,
|
||||
organization_uuid CHAR(36),
|
||||
request_device_identifier CHAR(36) NOT NULL,
|
||||
device_type INTEGER NOT NULL,
|
||||
request_ip TEXT NOT NULL,
|
||||
response_device_id CHAR(36),
|
||||
access_code TEXT NOT NULL,
|
||||
public_key TEXT NOT NULL,
|
||||
enc_key TEXT NOT NULL,
|
||||
master_password_hash TEXT NOT NULL,
|
||||
approved BOOLEAN,
|
||||
creation_date TIMESTAMP NOT NULL,
|
||||
response_date TIMESTAMP,
|
||||
authentication_date TIMESTAMP,
|
||||
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||
);
|
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE auth_requests
|
||||
ALTER COLUMN master_password_hash DROP NOT NULL;
|
||||
|
||||
ALTER TABLE auth_requests
|
||||
ALTER COLUMN enc_key DROP NOT NULL;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users_organizations
|
||||
ADD COLUMN external_id TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN "key" TEXT;
|
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE auth_requests (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
user_uuid TEXT NOT NULL,
|
||||
organization_uuid TEXT,
|
||||
request_device_identifier TEXT NOT NULL,
|
||||
device_type INTEGER NOT NULL,
|
||||
request_ip TEXT NOT NULL,
|
||||
response_device_id TEXT,
|
||||
access_code TEXT NOT NULL,
|
||||
public_key TEXT NOT NULL,
|
||||
enc_key TEXT NOT NULL,
|
||||
master_password_hash TEXT NOT NULL,
|
||||
approved BOOLEAN,
|
||||
creation_date DATETIME NOT NULL,
|
||||
response_date DATETIME,
|
||||
authentication_date DATETIME,
|
||||
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||
);
|
@@ -0,0 +1,29 @@
|
||||
-- Create new auth_requests table with master_password_hash as nullable column
|
||||
CREATE TABLE auth_requests_new (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
user_uuid TEXT NOT NULL,
|
||||
organization_uuid TEXT,
|
||||
request_device_identifier TEXT NOT NULL,
|
||||
device_type INTEGER NOT NULL,
|
||||
request_ip TEXT NOT NULL,
|
||||
response_device_id TEXT,
|
||||
access_code TEXT NOT NULL,
|
||||
public_key TEXT NOT NULL,
|
||||
enc_key TEXT,
|
||||
master_password_hash TEXT,
|
||||
approved BOOLEAN,
|
||||
creation_date DATETIME NOT NULL,
|
||||
response_date DATETIME,
|
||||
authentication_date DATETIME,
|
||||
FOREIGN KEY (user_uuid) REFERENCES users (uuid),
|
||||
FOREIGN KEY (organization_uuid) REFERENCES organizations (uuid)
|
||||
);
|
||||
|
||||
-- Transfer current data to new table
|
||||
INSERT INTO auth_requests_new SELECT * FROM auth_requests;
|
||||
|
||||
-- Drop the old table
|
||||
DROP TABLE auth_requests;
|
||||
|
||||
-- Rename the new table to the original name
|
||||
ALTER TABLE auth_requests_new RENAME TO auth_requests;
|
@@ -0,0 +1,2 @@
|
||||
-- Add the external_id to the users_organizations table
|
||||
ALTER TABLE "users_organizations" ADD COLUMN "external_id" TEXT;
|
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN "key" TEXT;
|
@@ -1 +0,0 @@
|
||||
1.70.0
|
4
rust-toolchain.toml
Normal file
4
rust-toolchain.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
[toolchain]
|
||||
channel = "1.73.0"
|
||||
components = [ "rustfmt", "clippy" ]
|
||||
profile = "minimal"
|
@@ -279,12 +279,11 @@ async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
||||
#[post("/invite", data = "<data>")]
|
||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
let email = data.email.clone();
|
||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||
err_code!("User already exists", Status::Conflict.code)
|
||||
}
|
||||
|
||||
let mut user = User::new(email);
|
||||
let mut user = User::new(data.email);
|
||||
|
||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||
if CONFIG.mail_enabled() {
|
||||
@@ -326,6 +325,10 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["LastActive"] = match u.last_active(&mut conn).await {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||
None => json!(None::<String>),
|
||||
};
|
||||
users_json.push(usr);
|
||||
}
|
||||
|
||||
|
@@ -1,13 +1,14 @@
|
||||
use crate::db::DbPool;
|
||||
use chrono::Utc;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::log_user_event, register_push_device, unregister_push_device, EmptyResult, JsonResult, JsonUpcase,
|
||||
Notify, NumberOrString, PasswordData, UpdateType,
|
||||
core::log_user_event, register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult,
|
||||
JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType,
|
||||
},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
@@ -51,6 +52,11 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
put_device_token,
|
||||
put_clear_device_token,
|
||||
post_clear_device_token,
|
||||
post_auth_request,
|
||||
get_auth_request,
|
||||
put_auth_request,
|
||||
get_auth_request_response,
|
||||
get_auth_requests,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -340,7 +346,7 @@ async fn post_password(
|
||||
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
// Prevent loging out the client where the user requested this endpoint from.
|
||||
// Prevent logging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
@@ -487,7 +493,7 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
||||
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
// Prevent loging out the client where the user requested this endpoint from.
|
||||
// Prevent logging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
@@ -527,6 +533,10 @@ struct EmailTokenData {
|
||||
|
||||
#[post("/accounts/email-token", data = "<data>")]
|
||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.email_change_allowed() {
|
||||
err!("Email change is not allowed.");
|
||||
}
|
||||
|
||||
let data: EmailTokenData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -573,6 +583,10 @@ async fn post_email(
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
if !CONFIG.email_change_allowed() {
|
||||
err!("Email change is not allowed.");
|
||||
}
|
||||
|
||||
let data: ChangeEmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -964,10 +978,10 @@ async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Head
|
||||
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
||||
}
|
||||
if let Err(e) = device.save(&mut conn).await {
|
||||
err!(format!("An error occured while trying to save the device push token: {e}"));
|
||||
err!(format!("An error occurred while trying to save the device push token: {e}"));
|
||||
}
|
||||
if let Err(e) = register_push_device(headers.user.uuid, device).await {
|
||||
err!(format!("An error occured while proceeding registration of a device: {e}"));
|
||||
err!(format!("An error occurred while proceeding registration of a device: {e}"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -996,3 +1010,211 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
||||
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
|
||||
put_clear_device_token(uuid, conn).await
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct AuthRequestRequest {
|
||||
accessCode: String,
|
||||
deviceIdentifier: String,
|
||||
email: String,
|
||||
publicKey: String,
|
||||
#[serde(alias = "type")]
|
||||
_type: i32,
|
||||
}
|
||||
|
||||
#[post("/auth-requests", data = "<data>")]
|
||||
async fn post_auth_request(
|
||||
data: Json<AuthRequestRequest>,
|
||||
headers: ClientHeaders,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data = data.into_inner();
|
||||
|
||||
let user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => {
|
||||
err!("AuthRequest doesn't exist")
|
||||
}
|
||||
};
|
||||
|
||||
let mut auth_request = AuthRequest::new(
|
||||
user.uuid.clone(),
|
||||
data.deviceIdentifier.clone(),
|
||||
headers.device_type,
|
||||
headers.ip.ip.to_string(),
|
||||
data.accessCode,
|
||||
data.publicKey,
|
||||
);
|
||||
auth_request.save(&mut conn).await?;
|
||||
|
||||
nt.send_auth_request(&user.uuid, &auth_request.uuid, &data.deviceIdentifier, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"id": auth_request.uuid,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": null,
|
||||
"masterPasswordHash": null,
|
||||
"creationDate": auth_request.creation_date.and_utc(),
|
||||
"responseDate": null,
|
||||
"requestApproved": false,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object": "auth-request"
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/auth-requests/<uuid>")]
|
||||
async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult {
|
||||
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(auth_request) => auth_request,
|
||||
None => {
|
||||
err!("AuthRequest doesn't exist")
|
||||
}
|
||||
};
|
||||
|
||||
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||
|
||||
Ok(Json(json!(
|
||||
{
|
||||
"id": uuid,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": auth_request.enc_key,
|
||||
"masterPasswordHash": auth_request.master_password_hash,
|
||||
"creationDate": auth_request.creation_date.and_utc(),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": auth_request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
}
|
||||
)))
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct AuthResponseRequest {
|
||||
deviceIdentifier: String,
|
||||
key: String,
|
||||
masterPasswordHash: Option<String>,
|
||||
requestApproved: bool,
|
||||
}
|
||||
|
||||
#[put("/auth-requests/<uuid>", data = "<data>")]
|
||||
async fn put_auth_request(
|
||||
uuid: &str,
|
||||
data: Json<AuthResponseRequest>,
|
||||
mut conn: DbConn,
|
||||
ant: AnonymousNotify<'_>,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data = data.into_inner();
|
||||
let mut auth_request: AuthRequest = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(auth_request) => auth_request,
|
||||
None => {
|
||||
err!("AuthRequest doesn't exist")
|
||||
}
|
||||
};
|
||||
|
||||
auth_request.approved = Some(data.requestApproved);
|
||||
auth_request.enc_key = Some(data.key);
|
||||
auth_request.master_password_hash = data.masterPasswordHash;
|
||||
auth_request.response_device_id = Some(data.deviceIdentifier.clone());
|
||||
auth_request.save(&mut conn).await?;
|
||||
|
||||
if auth_request.approved.unwrap_or(false) {
|
||||
ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await;
|
||||
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.deviceIdentifier, &mut conn).await;
|
||||
}
|
||||
|
||||
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||
|
||||
Ok(Json(json!(
|
||||
{
|
||||
"id": uuid,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": auth_request.enc_key,
|
||||
"masterPasswordHash": auth_request.master_password_hash,
|
||||
"creationDate": auth_request.creation_date.and_utc(),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": auth_request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
}
|
||||
)))
|
||||
}
|
||||
|
||||
#[get("/auth-requests/<uuid>/response?<code>")]
|
||||
async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) -> JsonResult {
|
||||
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(auth_request) => auth_request,
|
||||
None => {
|
||||
err!("AuthRequest doesn't exist")
|
||||
}
|
||||
};
|
||||
|
||||
if !auth_request.check_access_code(code) {
|
||||
err!("Access code invalid doesn't exist")
|
||||
}
|
||||
|
||||
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||
|
||||
Ok(Json(json!(
|
||||
{
|
||||
"id": uuid,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": auth_request.enc_key,
|
||||
"masterPasswordHash": auth_request.master_password_hash,
|
||||
"creationDate": auth_request.creation_date.and_utc(),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": auth_request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
}
|
||||
)))
|
||||
}
|
||||
|
||||
#[get("/auth-requests")]
|
||||
async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let auth_requests = AuthRequest::find_by_user(&headers.user.uuid, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"data": auth_requests
|
||||
.iter()
|
||||
.filter(|request| request.approved.is_none())
|
||||
.map(|request| {
|
||||
let response_date_utc = request.response_date.map(|response_date| response_date.and_utc());
|
||||
|
||||
json!({
|
||||
"id": request.uuid,
|
||||
"publicKey": request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(request.device_type).to_string(),
|
||||
"requestIpAddress": request.request_ip,
|
||||
"key": request.enc_key,
|
||||
"masterPasswordHash": request.master_password_hash,
|
||||
"creationDate": request.creation_date.and_utc(),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
})
|
||||
}).collect::<Vec<Value>>(),
|
||||
"continuationToken": null,
|
||||
"object": "list"
|
||||
})))
|
||||
}
|
||||
|
||||
pub async fn purge_auth_requests(pool: DbPool) {
|
||||
debug!("Purging auth requests");
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
AuthRequest::purge_expired_auth_requests(&mut conn).await;
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging trashed ciphers")
|
||||
}
|
||||
}
|
||||
|
@@ -206,12 +206,13 @@ pub struct CipherData {
|
||||
// TODO: Some of these might appear all the time, no need for Option
|
||||
OrganizationId: Option<String>,
|
||||
|
||||
Key: Option<String>,
|
||||
|
||||
/*
|
||||
Login = 1,
|
||||
SecureNote = 2,
|
||||
Card = 3,
|
||||
Identity = 4,
|
||||
Fido2Key = 5
|
||||
Identity = 4
|
||||
*/
|
||||
pub Type: i32,
|
||||
pub Name: String,
|
||||
@@ -223,7 +224,6 @@ pub struct CipherData {
|
||||
SecureNote: Option<Value>,
|
||||
Card: Option<Value>,
|
||||
Identity: Option<Value>,
|
||||
Fido2Key: Option<Value>,
|
||||
|
||||
Favorite: Option<bool>,
|
||||
Reprompt: Option<i32>,
|
||||
@@ -359,14 +359,17 @@ pub async fn update_cipher_from_data(
|
||||
enforce_personal_ownership_policy(Some(&data), headers, conn).await?;
|
||||
|
||||
// Check that the client isn't updating an existing cipher with stale data.
|
||||
if let Some(dt) = data.LastKnownRevisionDate {
|
||||
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||
// ISO 8601 format
|
||||
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||
// And only perform this check when not importing ciphers, else the date/time check will fail.
|
||||
if ut != UpdateType::None {
|
||||
if let Some(dt) = data.LastKnownRevisionDate {
|
||||
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||
// ISO 8601 format
|
||||
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||
}
|
||||
Ok(_) => (),
|
||||
}
|
||||
Ok(_) => (),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -466,7 +469,6 @@ pub async fn update_cipher_from_data(
|
||||
2 => data.SecureNote,
|
||||
3 => data.Card,
|
||||
4 => data.Identity,
|
||||
5 => data.Fido2Key,
|
||||
_ => err!("Invalid type"),
|
||||
};
|
||||
|
||||
@@ -483,6 +485,7 @@ pub async fn update_cipher_from_data(
|
||||
None => err!("Data missing"),
|
||||
};
|
||||
|
||||
cipher.key = data.Key;
|
||||
cipher.name = data.Name;
|
||||
cipher.notes = data.Notes;
|
||||
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
||||
@@ -1752,7 +1755,7 @@ impl CipherSyncData {
|
||||
let cipher_folders: HashMap<String, String>;
|
||||
let cipher_favorites: HashSet<String>;
|
||||
match sync_type {
|
||||
// User Sync supports Folders and Favorits
|
||||
// User Sync supports Folders and Favorites
|
||||
CipherSyncType::User => {
|
||||
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
|
||||
cipher_folders = FolderCipher::find_by_user(user_uuid, conn).await.into_iter().collect();
|
||||
@@ -1760,7 +1763,7 @@ impl CipherSyncData {
|
||||
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
|
||||
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await.into_iter().collect();
|
||||
}
|
||||
// Organization Sync does not support Folders and Favorits.
|
||||
// Organization Sync does not support Folders and Favorites.
|
||||
// If these are set, it will cause issues in the web-vault.
|
||||
CipherSyncType::Organization => {
|
||||
cipher_folders = HashMap::with_capacity(0);
|
||||
@@ -1805,7 +1808,7 @@ impl CipherSyncData {
|
||||
.map(|collection_group| (collection_group.collections_uuid.clone(), collection_group))
|
||||
.collect();
|
||||
|
||||
// Get all organizations that the user has full access to via group assignement
|
||||
// Get all organizations that the user has full access to via group assignment
|
||||
let user_group_full_access_for_organizations: HashSet<String> =
|
||||
Group::gather_user_organizations_full_access(user_uuid, conn).await.into_iter().collect();
|
||||
|
||||
|
@@ -319,7 +319,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
||||
let claims = decode_emergency_access_invite(token)?;
|
||||
|
||||
// This can happen if the user who received the invite used a different email to signup.
|
||||
// Since we do not know if this is intented, we error out here and do nothing with the invite.
|
||||
// Since we do not know if this is intended, we error out here and do nothing with the invite.
|
||||
if claims.email != headers.user.email {
|
||||
err!("Claim email does not match current users email")
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ mod public;
|
||||
mod sends;
|
||||
pub mod two_factor;
|
||||
|
||||
pub use accounts::purge_auth_requests;
|
||||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||
@@ -193,7 +194,12 @@ fn version() -> Json<&'static str> {
|
||||
fn config() -> Json<Value> {
|
||||
let domain = crate::CONFIG.domain();
|
||||
Json(json!({
|
||||
"version": crate::VERSION,
|
||||
// Note: The clients use this version to handle backwards compatibility concerns
|
||||
// This means they expect a version that closely matches the Bitwarden server version
|
||||
// We should make sure that we keep this updated when we support the new server features
|
||||
// Version history:
|
||||
// - Individual cipher key encryption: 2023.9.1
|
||||
"version": "2023.9.1",
|
||||
"gitHash": option_env!("GIT_REV"),
|
||||
"server": {
|
||||
"name": "Vaultwarden",
|
||||
@@ -206,6 +212,13 @@ fn config() -> Json<Value> {
|
||||
"notifications": format!("{domain}/notifications"),
|
||||
"sso": "",
|
||||
},
|
||||
"featureStates": {
|
||||
// Any feature flags that we want the clients to use
|
||||
// Can check the enabled ones at:
|
||||
// https://vault.bitwarden.com/api/config
|
||||
"autofill-v2": true,
|
||||
"fido2-vault-credentials": true
|
||||
},
|
||||
"object": "config",
|
||||
}))
|
||||
}
|
||||
|
@@ -6,8 +6,7 @@ use serde_json::Value;
|
||||
use crate::{
|
||||
api::{
|
||||
core::{log_event, CipherSyncData, CipherSyncType},
|
||||
ApiResult, EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordData,
|
||||
UpdateType,
|
||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordData, UpdateType,
|
||||
},
|
||||
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
||||
db::{models::*, DbConn},
|
||||
@@ -61,6 +60,7 @@ pub fn routes() -> Vec<Route> {
|
||||
put_policy,
|
||||
get_organization_tax,
|
||||
get_plans,
|
||||
get_plans_all,
|
||||
get_plans_tax_rates,
|
||||
import,
|
||||
post_org_keys,
|
||||
@@ -468,7 +468,11 @@ async fn post_organization_collection_update(
|
||||
}
|
||||
|
||||
collection.name = data.Name;
|
||||
collection.external_id = data.ExternalId;
|
||||
collection.external_id = match data.ExternalId {
|
||||
Some(external_id) if !external_id.trim().is_empty() => Some(external_id),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
collection.save(&mut conn).await?;
|
||||
|
||||
log_event(
|
||||
@@ -1516,9 +1520,9 @@ async fn bulk_public_keys(
|
||||
let data: OrgBulkIds = data.into_inner().data;
|
||||
|
||||
let mut bulk_response = Vec::new();
|
||||
// Check all received UserOrg UUID's and find the matching User to retreive the public-key.
|
||||
// Check all received UserOrg UUID's and find the matching User to retrieve the public-key.
|
||||
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
|
||||
// The web-vault will then ignore that user for the folowing steps.
|
||||
// The web-vault will then ignore that user for the following steps.
|
||||
for user_org_id in data.Ids {
|
||||
match UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &mut conn).await {
|
||||
Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &mut conn).await {
|
||||
@@ -1807,12 +1811,28 @@ fn get_plans() -> Json<Value> {
|
||||
"Product": 0,
|
||||
"Name": "Free",
|
||||
"NameLocalizationKey": "planNameFree",
|
||||
"BitwardenProduct": 0,
|
||||
"MaxUsers": 0,
|
||||
"DescriptionLocalizationKey": "planDescFree"
|
||||
},{
|
||||
"Object": "plan",
|
||||
"Type": 0,
|
||||
"Product": 1,
|
||||
"Name": "Free",
|
||||
"NameLocalizationKey": "planNameFree",
|
||||
"BitwardenProduct": 1,
|
||||
"MaxUsers": 0,
|
||||
"DescriptionLocalizationKey": "planDescFree"
|
||||
}],
|
||||
"ContinuationToken": null
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/plans/all")]
|
||||
fn get_plans_all() -> Json<Value> {
|
||||
get_plans()
|
||||
}
|
||||
|
||||
#[get("/plans/sales-tax-rates")]
|
||||
fn get_plans_tax_rates(_headers: Headers) -> Json<Value> {
|
||||
// Prevent a 404 error, which also causes Javascript errors.
|
||||
@@ -1862,7 +1882,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
||||
// This means that this endpoint can end up removing users that were added manually by an admin,
|
||||
// as opposed to upstream which only removes auto-imported users.
|
||||
|
||||
// User needs to be admin or owner to use the Directry Connector
|
||||
// User needs to be admin or owner to use the Directory Connector
|
||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
|
||||
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
||||
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
||||
@@ -2222,29 +2242,22 @@ struct GroupRequest {
|
||||
}
|
||||
|
||||
impl GroupRequest {
|
||||
pub fn to_group(&self, organizations_uuid: &str) -> ApiResult<Group> {
|
||||
match self.AccessAll {
|
||||
Some(access_all_value) => Ok(Group::new(
|
||||
organizations_uuid.to_owned(),
|
||||
self.Name.clone(),
|
||||
access_all_value,
|
||||
self.ExternalId.clone(),
|
||||
)),
|
||||
_ => err!("Could not convert GroupRequest to Group, because AccessAll has no value!"),
|
||||
}
|
||||
pub fn to_group(&self, organizations_uuid: &str) -> Group {
|
||||
Group::new(
|
||||
String::from(organizations_uuid),
|
||||
self.Name.clone(),
|
||||
self.AccessAll.unwrap_or(false),
|
||||
self.ExternalId.clone(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn update_group(&self, mut group: Group) -> ApiResult<Group> {
|
||||
match self.AccessAll {
|
||||
Some(access_all_value) => {
|
||||
group.name = self.Name.clone();
|
||||
group.access_all = access_all_value;
|
||||
group.set_external_id(self.ExternalId.clone());
|
||||
pub fn update_group(&self, mut group: Group) -> Group {
|
||||
group.name = self.Name.clone();
|
||||
group.access_all = self.AccessAll.unwrap_or(false);
|
||||
// Group Updates do not support changing the external_id
|
||||
// These input fields are in a disabled state, and can only be updated/added via ldap_import
|
||||
|
||||
Ok(group)
|
||||
}
|
||||
_ => err!("Could not update group, because AccessAll has no value!"),
|
||||
}
|
||||
group
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2305,7 +2318,7 @@ async fn post_groups(
|
||||
}
|
||||
|
||||
let group_request = data.into_inner().data;
|
||||
let group = group_request.to_group(org_id)?;
|
||||
let group = group_request.to_group(org_id);
|
||||
|
||||
log_event(
|
||||
EventType::GroupCreated as i32,
|
||||
@@ -2339,7 +2352,7 @@ async fn put_group(
|
||||
};
|
||||
|
||||
let group_request = data.into_inner().data;
|
||||
let updated_group = group_request.update_group(group)?;
|
||||
let updated_group = group_request.update_group(group);
|
||||
|
||||
CollectionGroup::delete_all_by_group(group_id, &mut conn).await?;
|
||||
GroupUser::delete_all_by_group(group_id, &mut conn).await?;
|
||||
@@ -2884,7 +2897,7 @@ async fn put_reset_password_enrollment(
|
||||
|
||||
// This is a new function active since the v2022.9.x clients.
|
||||
// It combines the previous two calls done before.
|
||||
// We call those two functions here and combine them our selfs.
|
||||
// We call those two functions here and combine them ourselves.
|
||||
//
|
||||
// NOTE: It seems clients can't handle uppercase-first keys!!
|
||||
// We need to convert all keys so they have the first character to be a lowercase.
|
||||
|
@@ -56,16 +56,34 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||
if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||
{
|
||||
user_org.revoke();
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
// Only revoke a user if it is not the last confirmed owner
|
||||
let revoked = if user_org.atype == UserOrgType::Owner
|
||||
&& user_org.status == UserOrgStatus::Confirmed as i32
|
||||
{
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn).await
|
||||
<= 1
|
||||
{
|
||||
warn!("Can't revoke the last owner");
|
||||
false
|
||||
} else {
|
||||
user_org.revoke()
|
||||
}
|
||||
} else {
|
||||
user_org.revoke()
|
||||
};
|
||||
|
||||
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||
if revoked || ext_modified {
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
// If user is part of the organization, restore it
|
||||
} else if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||
{
|
||||
if user_org.status < UserOrgStatus::Revoked as i32 {
|
||||
user_org.restore();
|
||||
let restored = user_org.restore();
|
||||
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||
if restored || ext_modified {
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
} else {
|
||||
@@ -73,9 +91,8 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
||||
Some(user) => user, // exists in vaultwarden
|
||||
None => {
|
||||
// doesn't exist in vaultwarden
|
||||
// User does not exist yet
|
||||
let mut new_user = User::new(user_data.Email.clone());
|
||||
new_user.set_external_id(Some(user_data.ExternalId.clone()));
|
||||
new_user.save(&mut conn).await?;
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
@@ -85,13 +102,14 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||
new_user
|
||||
}
|
||||
};
|
||||
let user_org_status = if CONFIG.mail_enabled() {
|
||||
let user_org_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() {
|
||||
UserOrgStatus::Invited as i32
|
||||
} else {
|
||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||
};
|
||||
|
||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
|
||||
new_org_user.access_all = false;
|
||||
new_org_user.atype = UserOrgType::User as i32;
|
||||
new_org_user.status = user_org_status;
|
||||
@@ -132,12 +150,10 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||
|
||||
for ext_id in &group_data.MemberExternalIds {
|
||||
if let Some(user) = User::find_by_external_id(ext_id, &mut conn).await {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await
|
||||
{
|
||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||
group_user.save(&mut conn).await?;
|
||||
}
|
||||
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
|
||||
{
|
||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||
group_user.save(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -150,10 +166,8 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
||||
if let Some(user_external_id) =
|
||||
User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.external_id)
|
||||
{
|
||||
if user_external_id.is_some() && !sync_members.contains(&user_external_id.unwrap()) {
|
||||
if let Some(ref user_external_id) = user_org.external_id {
|
||||
if !sync_members.contains(user_external_id) {
|
||||
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
||||
// Removing owner, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
||||
|
@@ -340,9 +340,13 @@ async fn post_send_file_v2_data(
|
||||
|
||||
let mut data = data.into_inner();
|
||||
|
||||
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else { err!("Send not found. Unable to save the file.") };
|
||||
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else {
|
||||
err!("Send not found. Unable to save the file.")
|
||||
};
|
||||
|
||||
let Some(send_user_id) = &send.user_uuid else {err!("Sends are only supported for users at the moment")};
|
||||
let Some(send_user_id) = &send.user_uuid else {
|
||||
err!("Sends are only supported for users at the moment")
|
||||
};
|
||||
if send_user_id != &headers.user.uuid {
|
||||
err!("Send doesn't belong to user");
|
||||
}
|
||||
|
@@ -177,7 +177,7 @@ pub async fn validate_totp_code(
|
||||
}
|
||||
}
|
||||
|
||||
// Else no valide code received, deny access
|
||||
// Else no valid code received, deny access
|
||||
err!(
|
||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||
ErrorEvent {
|
||||
|
252
src/api/icons.rs
252
src/api/icons.rs
@@ -19,7 +19,7 @@ use tokio::{
|
||||
net::lookup_host,
|
||||
};
|
||||
|
||||
use html5gum::{Emitter, EndTag, HtmlString, InfallibleTokenizer, Readable, StartTag, StringReader, Tokenizer};
|
||||
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
|
||||
|
||||
use crate::{
|
||||
error::Error,
|
||||
@@ -46,10 +46,15 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
// Generate the cookie store
|
||||
let cookie_store = Arc::new(Jar::default());
|
||||
|
||||
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
|
||||
let pool_idle_timeout = Duration::from_secs(10);
|
||||
// Reuse the client between requests
|
||||
let client = get_reqwest_client_builder()
|
||||
.cookie_provider(Arc::clone(&cookie_store))
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.timeout(icon_download_timeout)
|
||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||
.trust_dns(true)
|
||||
.default_headers(default_headers.clone());
|
||||
|
||||
match client.build() {
|
||||
@@ -58,9 +63,11 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
||||
get_reqwest_client_builder()
|
||||
.cookie_provider(cookie_store)
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(default_headers)
|
||||
.timeout(icon_download_timeout)
|
||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||
.trust_dns(false)
|
||||
.default_headers(default_headers)
|
||||
.build()
|
||||
.expect("Failed to build client")
|
||||
}
|
||||
@@ -258,7 +265,7 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Clone)]
|
||||
enum DomainBlacklistReason {
|
||||
Regex,
|
||||
IP,
|
||||
@@ -415,38 +422,34 @@ fn get_favicons_node(
|
||||
const TAG_LINK: &[u8] = b"link";
|
||||
const TAG_BASE: &[u8] = b"base";
|
||||
const TAG_HEAD: &[u8] = b"head";
|
||||
const ATTR_REL: &[u8] = b"rel";
|
||||
const ATTR_HREF: &[u8] = b"href";
|
||||
const ATTR_SIZES: &[u8] = b"sizes";
|
||||
|
||||
let mut base_url = url.clone();
|
||||
let mut icon_tags: Vec<StartTag> = Vec::new();
|
||||
let mut icon_tags: Vec<Tag> = Vec::new();
|
||||
for token in dom {
|
||||
match token {
|
||||
FaviconToken::StartTag(tag) => {
|
||||
if *tag.name == TAG_LINK
|
||||
&& tag.attributes.contains_key(ATTR_REL)
|
||||
&& tag.attributes.contains_key(ATTR_HREF)
|
||||
{
|
||||
let rel_value = std::str::from_utf8(tag.attributes.get(ATTR_REL).unwrap())
|
||||
.unwrap_or_default()
|
||||
.to_ascii_lowercase();
|
||||
if rel_value.contains("icon") && !rel_value.contains("mask-icon") {
|
||||
icon_tags.push(tag);
|
||||
}
|
||||
} else if *tag.name == TAG_BASE && tag.attributes.contains_key(ATTR_HREF) {
|
||||
let href = std::str::from_utf8(tag.attributes.get(ATTR_HREF).unwrap()).unwrap_or_default();
|
||||
debug!("Found base href: {href}");
|
||||
base_url = match base_url.join(href) {
|
||||
Ok(inner_url) => inner_url,
|
||||
_ => url.clone(),
|
||||
};
|
||||
}
|
||||
let tag_name: &[u8] = &token.tag.name;
|
||||
match tag_name {
|
||||
TAG_LINK => {
|
||||
icon_tags.push(token.tag);
|
||||
}
|
||||
FaviconToken::EndTag(tag) => {
|
||||
if *tag.name == TAG_HEAD {
|
||||
break;
|
||||
}
|
||||
TAG_BASE => {
|
||||
base_url = if let Some(href) = token.tag.attributes.get(ATTR_HREF) {
|
||||
let href = std::str::from_utf8(href).unwrap_or_default();
|
||||
debug!("Found base href: {href}");
|
||||
match base_url.join(href) {
|
||||
Ok(inner_url) => inner_url,
|
||||
_ => continue,
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
}
|
||||
TAG_HEAD if token.closing => {
|
||||
break;
|
||||
}
|
||||
_ => {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -531,11 +534,11 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
let mut referer = String::new();
|
||||
|
||||
if let Ok(content) = resp {
|
||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
||||
// Extract the URL from the response in case redirects occurred (like @ gitlab.com)
|
||||
let url = content.url().clone();
|
||||
|
||||
// Set the referer to be used on the final request, some sites check this.
|
||||
// Mostly used to prevent direct linking and other security resons.
|
||||
// Mostly used to prevent direct linking and other security reasons.
|
||||
referer = url.to_string();
|
||||
|
||||
// Add the fallback favicon.ico and apple-touch-icon.png to the list with the domain the content responded from.
|
||||
@@ -635,7 +638,7 @@ fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a Tuple with the width and hight as a seperate value extracted from the sizes attribute
|
||||
/// Returns a Tuple with the width and height as a separate value extracted from the sizes attribute
|
||||
/// It will return 0 for both values if no match has been found.
|
||||
///
|
||||
/// # Arguments
|
||||
@@ -682,7 +685,9 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
|
||||
for icon in icon_result.iconlist.iter().take(5) {
|
||||
if icon.href.starts_with("data:image") {
|
||||
let Ok(datauri) = DataUrl::process(&icon.href) else {continue};
|
||||
let Ok(datauri) = DataUrl::process(&icon.href) else {
|
||||
continue;
|
||||
};
|
||||
// Check if we are able to decode the data uri
|
||||
let mut body = BytesMut::new();
|
||||
match datauri.decode::<_, ()>(|bytes| {
|
||||
@@ -820,43 +825,64 @@ impl reqwest::cookie::CookieStore for Jar {
|
||||
}
|
||||
|
||||
/// Custom FaviconEmitter for the html5gum parser.
|
||||
/// The FaviconEmitter is using an almost 1:1 copy of the DefaultEmitter with some small changes.
|
||||
/// The FaviconEmitter is using an optimized version of the DefaultEmitter.
|
||||
/// This prevents emitting tags like comments, doctype and also strings between the tags.
|
||||
/// But it will also only emit the tags we need and only if they have the correct attributes
|
||||
/// Therefor parsing the HTML content is faster.
|
||||
use std::collections::{BTreeSet, VecDeque};
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum FaviconToken {
|
||||
StartTag(StartTag),
|
||||
EndTag(EndTag),
|
||||
#[derive(Default)]
|
||||
pub struct Tag {
|
||||
/// The tag's name, such as `"link"` or `"base"`.
|
||||
pub name: HtmlString,
|
||||
|
||||
/// A mapping for any HTML attributes this start tag may have.
|
||||
///
|
||||
/// Duplicate attributes are ignored after the first one as per WHATWG spec.
|
||||
pub attributes: BTreeMap<HtmlString, HtmlString>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct FaviconToken {
|
||||
tag: Tag,
|
||||
closing: bool,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct FaviconEmitter {
|
||||
current_token: Option<FaviconToken>,
|
||||
last_start_tag: HtmlString,
|
||||
current_attribute: Option<(HtmlString, HtmlString)>,
|
||||
seen_attributes: BTreeSet<HtmlString>,
|
||||
emitted_tokens: VecDeque<FaviconToken>,
|
||||
emit_token: bool,
|
||||
}
|
||||
|
||||
impl FaviconEmitter {
|
||||
fn emit_token(&mut self, token: FaviconToken) {
|
||||
self.emitted_tokens.push_front(token);
|
||||
}
|
||||
fn flush_current_attribute(&mut self, emit_current_tag: bool) {
|
||||
const ATTR_HREF: &[u8] = b"href";
|
||||
const ATTR_REL: &[u8] = b"rel";
|
||||
const TAG_LINK: &[u8] = b"link";
|
||||
const TAG_BASE: &[u8] = b"base";
|
||||
const TAG_HEAD: &[u8] = b"head";
|
||||
|
||||
fn flush_current_attribute(&mut self) {
|
||||
if let Some((k, v)) = self.current_attribute.take() {
|
||||
match self.current_token {
|
||||
Some(FaviconToken::StartTag(ref mut tag)) => {
|
||||
tag.attributes.entry(k).and_modify(|_| {}).or_insert(v);
|
||||
}
|
||||
Some(FaviconToken::EndTag(_)) => {
|
||||
self.seen_attributes.insert(k);
|
||||
}
|
||||
_ => {
|
||||
debug_assert!(false);
|
||||
if let Some(ref mut token) = self.current_token {
|
||||
let tag_name: &[u8] = &token.tag.name;
|
||||
|
||||
if self.current_attribute.is_some() && (tag_name == TAG_BASE || tag_name == TAG_LINK) {
|
||||
let (k, v) = self.current_attribute.take().unwrap();
|
||||
token.tag.attributes.entry(k).and_modify(|_| {}).or_insert(v);
|
||||
}
|
||||
|
||||
let tag_attr = &token.tag.attributes;
|
||||
match tag_name {
|
||||
TAG_HEAD if token.closing => self.emit_token = true,
|
||||
TAG_BASE if tag_attr.contains_key(ATTR_HREF) => self.emit_token = true,
|
||||
TAG_LINK if emit_current_tag && tag_attr.contains_key(ATTR_REL) && tag_attr.contains_key(ATTR_HREF) => {
|
||||
let rel_value =
|
||||
std::str::from_utf8(token.tag.attributes.get(ATTR_REL).unwrap()).unwrap_or_default();
|
||||
if rel_value.contains("icon") && !rel_value.contains("mask-icon") {
|
||||
self.emit_token = true
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -871,87 +897,71 @@ impl Emitter for FaviconEmitter {
|
||||
}
|
||||
|
||||
fn pop_token(&mut self) -> Option<Self::Token> {
|
||||
self.emitted_tokens.pop_back()
|
||||
}
|
||||
|
||||
fn init_start_tag(&mut self) {
|
||||
self.current_token = Some(FaviconToken::StartTag(StartTag::default()));
|
||||
}
|
||||
|
||||
fn init_end_tag(&mut self) {
|
||||
self.current_token = Some(FaviconToken::EndTag(EndTag::default()));
|
||||
self.seen_attributes.clear();
|
||||
}
|
||||
|
||||
fn emit_current_tag(&mut self) -> Option<html5gum::State> {
|
||||
self.flush_current_attribute();
|
||||
let mut token = self.current_token.take().unwrap();
|
||||
let mut emit = false;
|
||||
match token {
|
||||
FaviconToken::EndTag(ref mut tag) => {
|
||||
// Always clean seen attributes
|
||||
self.seen_attributes.clear();
|
||||
self.set_last_start_tag(None);
|
||||
|
||||
// Only trigger an emit for the </head> tag.
|
||||
// This is matched, and will break the for-loop.
|
||||
if *tag.name == b"head" {
|
||||
emit = true;
|
||||
}
|
||||
}
|
||||
FaviconToken::StartTag(ref mut tag) => {
|
||||
// Only trriger an emit for <link> and <base> tags.
|
||||
// These are the only tags we want to parse.
|
||||
if *tag.name == b"link" || *tag.name == b"base" {
|
||||
self.set_last_start_tag(Some(&tag.name));
|
||||
emit = true;
|
||||
} else {
|
||||
self.set_last_start_tag(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only emit the tags we want to parse.
|
||||
if emit {
|
||||
self.emit_token(token);
|
||||
if self.emit_token {
|
||||
self.emit_token = false;
|
||||
return self.current_token.take();
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn init_start_tag(&mut self) {
|
||||
self.current_token = Some(FaviconToken {
|
||||
tag: Tag::default(),
|
||||
closing: false,
|
||||
});
|
||||
}
|
||||
|
||||
fn init_end_tag(&mut self) {
|
||||
self.current_token = Some(FaviconToken {
|
||||
tag: Tag::default(),
|
||||
closing: true,
|
||||
});
|
||||
}
|
||||
|
||||
fn emit_current_tag(&mut self) -> Option<html5gum::State> {
|
||||
self.flush_current_attribute(true);
|
||||
self.last_start_tag.clear();
|
||||
if self.current_token.is_some() && !self.current_token.as_ref().unwrap().closing {
|
||||
self.last_start_tag.extend(&*self.current_token.as_ref().unwrap().tag.name);
|
||||
}
|
||||
html5gum::naive_next_state(&self.last_start_tag)
|
||||
}
|
||||
|
||||
fn push_tag_name(&mut self, s: &[u8]) {
|
||||
match self.current_token {
|
||||
Some(
|
||||
FaviconToken::StartTag(StartTag {
|
||||
ref mut name,
|
||||
..
|
||||
})
|
||||
| FaviconToken::EndTag(EndTag {
|
||||
ref mut name,
|
||||
..
|
||||
}),
|
||||
) => {
|
||||
name.extend(s);
|
||||
}
|
||||
_ => debug_assert!(false),
|
||||
if let Some(ref mut token) = self.current_token {
|
||||
token.tag.name.extend(s);
|
||||
}
|
||||
}
|
||||
|
||||
fn init_attribute(&mut self) {
|
||||
self.flush_current_attribute();
|
||||
self.current_attribute = Some(Default::default());
|
||||
self.flush_current_attribute(false);
|
||||
self.current_attribute = match &self.current_token {
|
||||
Some(token) => {
|
||||
let tag_name: &[u8] = &token.tag.name;
|
||||
match tag_name {
|
||||
b"link" | b"head" | b"base" => Some(Default::default()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
}
|
||||
|
||||
fn push_attribute_name(&mut self, s: &[u8]) {
|
||||
self.current_attribute.as_mut().unwrap().0.extend(s);
|
||||
if let Some(attr) = &mut self.current_attribute {
|
||||
attr.0.extend(s)
|
||||
}
|
||||
}
|
||||
|
||||
fn push_attribute_value(&mut self, s: &[u8]) {
|
||||
self.current_attribute.as_mut().unwrap().1.extend(s);
|
||||
if let Some(attr) = &mut self.current_attribute {
|
||||
attr.1.extend(s)
|
||||
}
|
||||
}
|
||||
|
||||
fn current_is_appropriate_end_tag_token(&mut self) -> bool {
|
||||
match self.current_token {
|
||||
Some(FaviconToken::EndTag(ref tag)) => !self.last_start_tag.is_empty() && self.last_start_tag == tag.name,
|
||||
match &self.current_token {
|
||||
Some(token) if token.closing => !self.last_start_tag.is_empty() && self.last_start_tag == token.tag.name,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
@@ -155,7 +155,27 @@ async fn _password_login(
|
||||
|
||||
// Check password
|
||||
let password = data.password.as_ref().unwrap();
|
||||
if !user.check_valid_password(password) {
|
||||
if let Some(auth_request_uuid) = data.auth_request.clone() {
|
||||
if let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await {
|
||||
if !auth_request.check_access_code(password) {
|
||||
err!(
|
||||
"Username or access code is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
)
|
||||
}
|
||||
} else {
|
||||
err!(
|
||||
"Auth request not found. Try again.",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
)
|
||||
}
|
||||
} else if !user.check_valid_password(password) {
|
||||
err!(
|
||||
"Username or password is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
@@ -260,6 +280,10 @@ async fn _password_login(
|
||||
"ResetMasterPassword": false,// TODO: Same as above
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
"UserDecryptionOptions": {
|
||||
"HasMasterPassword": !user.password_hash.is_empty(),
|
||||
"Object": "userDecryptionOptions"
|
||||
},
|
||||
});
|
||||
|
||||
if let Some(token) = twofactor_token {
|
||||
@@ -445,7 +469,7 @@ async fn twofactor_auth(
|
||||
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?;
|
||||
|
||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
|
||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
||||
|
||||
let twofactor_code = match data.two_factor_token {
|
||||
Some(ref code) => code,
|
||||
@@ -646,6 +670,8 @@ struct ConnectData {
|
||||
#[field(name = uncased("two_factor_remember"))]
|
||||
#[field(name = uncased("twofactorremember"))]
|
||||
two_factor_remember: Option<i32>,
|
||||
#[field(name = uncased("authrequest"))]
|
||||
auth_request: Option<String>,
|
||||
}
|
||||
|
||||
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
||||
|
@@ -13,6 +13,7 @@ pub use crate::api::{
|
||||
admin::catchers as admin_catchers,
|
||||
admin::routes as admin_routes,
|
||||
core::catchers as core_catchers,
|
||||
core::purge_auth_requests,
|
||||
core::purge_sends,
|
||||
core::purge_trashed_ciphers,
|
||||
core::routes as core_routes,
|
||||
@@ -22,7 +23,7 @@ pub use crate::api::{
|
||||
icons::routes as icons_routes,
|
||||
identity::routes as identity_routes,
|
||||
notifications::routes as notifications_routes,
|
||||
notifications::{start_notification_server, Notify, UpdateType},
|
||||
notifications::{start_notification_server, AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS},
|
||||
push::{
|
||||
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
|
||||
unregister_push_device,
|
||||
|
@@ -20,7 +20,7 @@ use tokio_tungstenite::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
auth::ClientIp,
|
||||
auth::{ClientIp, WsAccessTokenHeader},
|
||||
db::{
|
||||
models::{Cipher, Folder, Send as DbSend, User},
|
||||
DbConn,
|
||||
@@ -36,10 +36,19 @@ static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
|
||||
})
|
||||
});
|
||||
|
||||
use super::{push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update};
|
||||
pub static WS_ANONYMOUS_SUBSCRIPTIONS: Lazy<Arc<AnonymousWebSocketSubscriptions>> = Lazy::new(|| {
|
||||
Arc::new(AnonymousWebSocketSubscriptions {
|
||||
map: Arc::new(dashmap::DashMap::new()),
|
||||
})
|
||||
});
|
||||
|
||||
use super::{
|
||||
push::push_auth_request, push::push_auth_response, push_cipher_update, push_folder_update, push_logout,
|
||||
push_send_update, push_user_update,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![websockets_hub]
|
||||
routes![websockets_hub, anonymous_websockets_hub]
|
||||
}
|
||||
|
||||
#[derive(FromForm, Debug)]
|
||||
@@ -74,17 +83,50 @@ impl Drop for WSEntryMapGuard {
|
||||
}
|
||||
}
|
||||
|
||||
struct WSAnonymousEntryMapGuard {
|
||||
subscriptions: Arc<AnonymousWebSocketSubscriptions>,
|
||||
token: String,
|
||||
addr: IpAddr,
|
||||
}
|
||||
|
||||
impl WSAnonymousEntryMapGuard {
|
||||
fn new(subscriptions: Arc<AnonymousWebSocketSubscriptions>, token: String, addr: IpAddr) -> Self {
|
||||
Self {
|
||||
subscriptions,
|
||||
token,
|
||||
addr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for WSAnonymousEntryMapGuard {
|
||||
fn drop(&mut self) {
|
||||
info!("Closing WS connection from {}", self.addr);
|
||||
self.subscriptions.map.remove(&self.token);
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/hub?<data..>")]
|
||||
fn websockets_hub<'r>(
|
||||
ws: rocket_ws::WebSocket,
|
||||
data: WsAccessToken,
|
||||
ip: ClientIp,
|
||||
header_token: WsAccessTokenHeader,
|
||||
) -> Result<rocket_ws::Stream!['r], Error> {
|
||||
let addr = ip.ip;
|
||||
info!("Accepting Rocket WS connection from {addr}");
|
||||
|
||||
let Some(token) = data.access_token else { err_code!("Invalid claim", 401) };
|
||||
let Ok(claims) = crate::auth::decode_login(&token) else { err_code!("Invalid token", 401) };
|
||||
let token = if let Some(token) = data.access_token {
|
||||
token
|
||||
} else if let Some(token) = header_token.access_token {
|
||||
token
|
||||
} else {
|
||||
err_code!("Invalid claim", 401)
|
||||
};
|
||||
|
||||
let Ok(claims) = crate::auth::decode_login(&token) else {
|
||||
err_code!("Invalid token", 401)
|
||||
};
|
||||
|
||||
let (mut rx, guard) = {
|
||||
let users = Arc::clone(&WS_USERS);
|
||||
@@ -144,6 +186,72 @@ fn websockets_hub<'r>(
|
||||
})
|
||||
}
|
||||
|
||||
#[get("/anonymous-hub?<token..>")]
|
||||
fn anonymous_websockets_hub<'r>(
|
||||
ws: rocket_ws::WebSocket,
|
||||
token: String,
|
||||
ip: ClientIp,
|
||||
) -> Result<rocket_ws::Stream!['r], Error> {
|
||||
let addr = ip.ip;
|
||||
info!("Accepting Anonymous Rocket WS connection from {addr}");
|
||||
|
||||
let (mut rx, guard) = {
|
||||
let subscriptions = Arc::clone(&WS_ANONYMOUS_SUBSCRIPTIONS);
|
||||
|
||||
// Add a channel to send messages to this client to the map
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
||||
subscriptions.map.insert(token.clone(), tx);
|
||||
|
||||
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
||||
(rx, WSAnonymousEntryMapGuard::new(subscriptions, token, addr))
|
||||
};
|
||||
|
||||
Ok({
|
||||
rocket_ws::Stream! { ws => {
|
||||
let mut ws = ws;
|
||||
let _guard = guard;
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||
loop {
|
||||
tokio::select! {
|
||||
res = ws.next() => {
|
||||
match res {
|
||||
Some(Ok(message)) => {
|
||||
match message {
|
||||
// Respond to any pings
|
||||
Message::Ping(ping) => yield Message::Pong(ping),
|
||||
Message::Pong(_) => {/* Ignored */},
|
||||
|
||||
// We should receive an initial message with the protocol and version, and we will reply to it
|
||||
Message::Text(ref message) => {
|
||||
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
|
||||
|
||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||
yield Message::binary(INITIAL_RESPONSE);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// Just echo anything else the client sends
|
||||
_ => yield message,
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
res = rx.recv() => {
|
||||
match res {
|
||||
Some(res) => yield res,
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
_ = interval.tick() => yield Message::Ping(create_ping())
|
||||
}
|
||||
}
|
||||
}}
|
||||
})
|
||||
}
|
||||
|
||||
//
|
||||
// Websockets server
|
||||
//
|
||||
@@ -352,6 +460,69 @@ impl WebSocketUsers {
|
||||
push_send_update(ut, send, acting_device_uuid, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_auth_request(
|
||||
&self,
|
||||
user_uuid: &String,
|
||||
auth_request_uuid: &String,
|
||||
acting_device_uuid: &String,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
let data = create_update(
|
||||
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||
UpdateType::AuthRequest,
|
||||
Some(acting_device_uuid.to_string()),
|
||||
);
|
||||
self.send_update(user_uuid, &data).await;
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_auth_response(
|
||||
&self,
|
||||
user_uuid: &String,
|
||||
auth_response_uuid: &str,
|
||||
approving_device_uuid: String,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
let data = create_update(
|
||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||
UpdateType::AuthRequestResponse,
|
||||
approving_device_uuid.clone().into(),
|
||||
);
|
||||
self.send_update(auth_response_uuid, &data).await;
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AnonymousWebSocketSubscriptions {
|
||||
map: Arc<dashmap::DashMap<String, Sender<Message>>>,
|
||||
}
|
||||
|
||||
impl AnonymousWebSocketSubscriptions {
|
||||
async fn send_update(&self, token: &str, data: &[u8]) {
|
||||
if let Some(sender) = self.map.get(token).map(|v| v.clone()) {
|
||||
if let Err(e) = sender.send(Message::binary(data)).await {
|
||||
error!("Error sending WS update {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) {
|
||||
let data = create_anonymous_update(
|
||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||
UpdateType::AuthRequestResponse,
|
||||
user_uuid.to_string(),
|
||||
);
|
||||
self.send_update(auth_response_uuid, &data).await;
|
||||
}
|
||||
}
|
||||
|
||||
/* Message Structure
|
||||
@@ -387,6 +558,24 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui
|
||||
serialize(value)
|
||||
}
|
||||
|
||||
fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: String) -> Vec<u8> {
|
||||
use rmpv::Value as V;
|
||||
|
||||
let value = V::Array(vec![
|
||||
1.into(),
|
||||
V::Map(vec![]),
|
||||
V::Nil,
|
||||
"AuthRequestResponseRecieved".into(),
|
||||
V::Array(vec![V::Map(vec![
|
||||
("Type".into(), (ut as i32).into()),
|
||||
("Payload".into(), payload.into()),
|
||||
("UserId".into(), user_id.into()),
|
||||
])]),
|
||||
]);
|
||||
|
||||
serialize(value)
|
||||
}
|
||||
|
||||
fn create_ping() -> Vec<u8> {
|
||||
serialize(Value::Array(vec![6.into()]))
|
||||
}
|
||||
@@ -420,6 +609,7 @@ pub enum UpdateType {
|
||||
}
|
||||
|
||||
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
|
||||
pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>;
|
||||
|
||||
pub fn start_notification_server() -> Arc<WebSocketUsers> {
|
||||
let users = Arc::clone(&WS_USERS);
|
||||
|
@@ -116,7 +116,7 @@ pub async fn unregister_push_device(uuid: String) -> EmptyResult {
|
||||
.await
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("An error occured during device unregistration: {e}")),
|
||||
Err(e) => err!(format!("An error occurred during device unregistration: {e}")),
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
@@ -252,6 +252,43 @@ async fn send_to_push_relay(notification_data: Value) {
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
error!("An error occured while sending a send update to the push relay: {}", e);
|
||||
error!("An error occurred while sending a send update to the push relay: {}", e);
|
||||
};
|
||||
}
|
||||
|
||||
pub async fn push_auth_request(user_uuid: String, auth_request_uuid: String, conn: &mut crate::db::DbConn) {
|
||||
if Device::check_user_has_push_device(user_uuid.as_str(), conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user_uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": null,
|
||||
"identifier": null,
|
||||
"type": UpdateType::AuthRequest as i32,
|
||||
"payload": {
|
||||
"id": auth_request_uuid,
|
||||
"userId": user_uuid,
|
||||
}
|
||||
})));
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push_auth_response(
|
||||
user_uuid: String,
|
||||
auth_request_uuid: String,
|
||||
approving_device_uuid: String,
|
||||
conn: &mut crate::db::DbConn,
|
||||
) {
|
||||
if Device::check_user_has_push_device(user_uuid.as_str(), conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user_uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": approving_device_uuid,
|
||||
"identifier": approving_device_uuid,
|
||||
"type": UpdateType::AuthRequestResponse as i32,
|
||||
"payload": {
|
||||
"id": auth_request_uuid,
|
||||
"userId": user_uuid,
|
||||
}
|
||||
})));
|
||||
}
|
||||
}
|
||||
|
@@ -12,13 +12,19 @@ use crate::{
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
// If addding more routes here, consider also adding them to
|
||||
// If adding more routes here, consider also adding them to
|
||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||
let mut routes = routes![attachments, alive, alive_head, static_files];
|
||||
if CONFIG.web_vault_enabled() {
|
||||
routes![web_index, web_index_head, app_id, web_files, attachments, alive, alive_head, static_files]
|
||||
} else {
|
||||
routes![attachments, alive, alive_head, static_files]
|
||||
routes.append(&mut routes![web_index, web_index_head, app_id, web_files]);
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
if CONFIG.reload_templates() {
|
||||
routes.append(&mut routes![_static_files_dev]);
|
||||
}
|
||||
|
||||
routes
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
@@ -94,7 +100,9 @@ async fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
||||
|
||||
#[get("/attachments/<uuid>/<file_id>?<token>")]
|
||||
async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Option<NamedFile> {
|
||||
let Ok(claims) = dbg!(decode_file_download(&token)) else { return None };
|
||||
let Ok(claims) = decode_file_download(&token) else {
|
||||
return None;
|
||||
};
|
||||
if claims.sub != *uuid || claims.file_id != *file_id {
|
||||
return None;
|
||||
}
|
||||
@@ -116,7 +124,30 @@ fn alive_head(_conn: DbConn) -> EmptyResult {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[get("/vw_static/<filename>")]
|
||||
// This endpoint/function is used during development and development only.
|
||||
// It allows to easily develop the admin interface by always loading the files from disk instead from a slice of bytes
|
||||
// This will only be active during a debug build and only when `RELOAD_TEMPLATES` is set to `true`
|
||||
// NOTE: Do not forget to add any new files added to the `static_files` function below!
|
||||
#[cfg(debug_assertions)]
|
||||
#[get("/vw_static/<filename>", rank = 1)]
|
||||
pub async fn _static_files_dev(filename: PathBuf) -> Option<NamedFile> {
|
||||
warn!("LOADING STATIC FILES FROM DISK");
|
||||
let file = filename.to_str().unwrap_or_default();
|
||||
let ext = filename.extension().unwrap_or_default();
|
||||
|
||||
let path = if ext == "png" || ext == "svg" {
|
||||
tokio::fs::canonicalize(Path::new(file!()).parent().unwrap().join("../static/images/").join(file)).await
|
||||
} else {
|
||||
tokio::fs::canonicalize(Path::new(file!()).parent().unwrap().join("../static/scripts/").join(file)).await
|
||||
};
|
||||
|
||||
if let Ok(path) = path {
|
||||
return NamedFile::open(path).await.ok();
|
||||
};
|
||||
None
|
||||
}
|
||||
|
||||
#[get("/vw_static/<filename>", rank = 2)]
|
||||
pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Error> {
|
||||
match filename {
|
||||
"404.png" => Ok((ContentType::PNG, include_bytes!("../static/images/404.png"))),
|
||||
@@ -138,12 +169,12 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_diagnostics.js")))
|
||||
}
|
||||
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||
"bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))),
|
||||
"jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))),
|
||||
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.6.4.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.4.slim.js")))
|
||||
"jquery-3.7.0.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.0.slim.js")))
|
||||
}
|
||||
_ => err!(format!("Static file not found: {filename}")),
|
||||
}
|
||||
|
23
src/auth.rs
23
src/auth.rs
@@ -825,3 +825,26 @@ impl<'r> FromRequest<'r> for ClientIp {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WsAccessTokenHeader {
|
||||
pub access_token: Option<String>,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for WsAccessTokenHeader {
|
||||
type Error = ();
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
// Get access_token
|
||||
let access_token = match headers.get_one("Authorization") {
|
||||
Some(a) => a.rsplit("Bearer ").next().map(String::from),
|
||||
None => None,
|
||||
};
|
||||
|
||||
Outcome::Success(Self {
|
||||
access_token,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -126,7 +126,7 @@ macro_rules! make_config {
|
||||
|
||||
if show_overrides && !overrides.is_empty() {
|
||||
// We can't use warn! here because logging isn't setup yet.
|
||||
println!("[WARNING] The following environment variables are being overriden by the config.json file.");
|
||||
println!("[WARNING] The following environment variables are being overridden by the config.json file.");
|
||||
println!("[WARNING] Please use the admin panel to make changes to them:");
|
||||
println!("[WARNING] {}\n", overrides.join(", "));
|
||||
}
|
||||
@@ -164,7 +164,7 @@ macro_rules! make_config {
|
||||
)+)+
|
||||
|
||||
pub fn prepare_json(&self) -> serde_json::Value {
|
||||
let (def, cfg, overriden) = {
|
||||
let (def, cfg, overridden) = {
|
||||
let inner = &self.inner.read().unwrap();
|
||||
(inner._env.build(), inner.config.clone(), inner._overrides.clone())
|
||||
};
|
||||
@@ -211,7 +211,7 @@ macro_rules! make_config {
|
||||
element.insert("default".into(), serde_json::to_value(def.$name).unwrap());
|
||||
element.insert("type".into(), (_get_form_type(stringify!($ty))).into());
|
||||
element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into());
|
||||
element.insert("overridden".into(), (overriden.contains(&paste::paste!(stringify!([<$name:upper>])).into())).into());
|
||||
element.insert("overridden".into(), (overridden.contains(&paste::paste!(stringify!([<$name:upper>])).into())).into());
|
||||
element
|
||||
}),
|
||||
)+
|
||||
@@ -409,6 +409,10 @@ make_config! {
|
||||
/// Event cleanup schedule |> Cron schedule of the job that cleans old events from the event table.
|
||||
/// Defaults to daily. Set blank to disable this job.
|
||||
event_cleanup_schedule: String, false, def, "0 10 0 * * *".to_string();
|
||||
/// Auth Request cleanup schedule |> Cron schedule of the job that cleans old auth requests from the auth request.
|
||||
/// Defaults to every minute. Set blank to disable this job.
|
||||
auth_request_purge_schedule: String, false, def, "30 * * * * *".to_string();
|
||||
|
||||
},
|
||||
|
||||
/// General settings
|
||||
@@ -476,6 +480,8 @@ make_config! {
|
||||
invitation_expiration_hours: u32, false, def, 120;
|
||||
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
||||
emergency_access_allowed: bool, true, def, true;
|
||||
/// Allow email change |> Controls whether users can change their email. This setting applies globally to all users.
|
||||
email_change_allowed: bool, true, def, true;
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations for the password hash.
|
||||
/// The default for new users. If changed, it will be updated during login for existing users.
|
||||
password_iterations: i32, true, def, 600_000;
|
||||
@@ -492,7 +498,7 @@ make_config! {
|
||||
/// Invitation organization name |> Name shown in the invitation emails that don't come from a specific organization
|
||||
invitation_org_name: String, true, def, "Vaultwarden".to_string();
|
||||
|
||||
/// Events days retain |> Number of days to retain events stored in the database. If unset, events are kept indefently.
|
||||
/// Events days retain |> Number of days to retain events stored in the database. If unset, events are kept indefinitely.
|
||||
events_days_retain: i64, false, option;
|
||||
},
|
||||
|
||||
@@ -520,7 +526,7 @@ make_config! {
|
||||
/// has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||
/// are currently better supported by the Bitwarden clients.
|
||||
icon_redirect_code: u32, true, def, 302;
|
||||
/// Positive icon cache expiry |> Number of seconds to consider that an already cached icon is fresh. After this period, the icon will be redownloaded
|
||||
/// Positive icon cache expiry |> Number of seconds to consider that an already cached icon is fresh. After this period, the icon will be refreshed
|
||||
icon_cache_ttl: u64, true, def, 2_592_000;
|
||||
/// Negative icon cache expiry |> Number of seconds before trying to download an icon that failed again.
|
||||
icon_cache_negttl: u64, true, def, 259_200;
|
||||
@@ -530,7 +536,7 @@ make_config! {
|
||||
/// Useful to hide other servers in the local network. Check the WIKI for more details
|
||||
icon_blacklist_regex: String, true, option;
|
||||
/// Icon blacklist non global IPs |> Any IP which is not defined as a global IP will be blacklisted.
|
||||
/// Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
/// Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
icon_blacklist_non_global_ips: bool, true, def, true;
|
||||
|
||||
/// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
|
||||
@@ -566,7 +572,7 @@ make_config! {
|
||||
/// Max database connection retries |> Number of times to retry the database connection during startup, with 1 second between each retry, set to 0 to retry indefinitely
|
||||
db_connection_retries: u32, false, def, 15;
|
||||
|
||||
/// Timeout when aquiring database connection
|
||||
/// Timeout when acquiring database connection
|
||||
database_timeout: u64, false, def, 30;
|
||||
|
||||
/// Database connection pool size
|
||||
@@ -893,6 +899,10 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
err!("`EVENT_CLEANUP_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
if !cfg.auth_request_purge_schedule.is_empty() && cfg.auth_request_purge_schedule.parse::<Schedule>().is_err() {
|
||||
err!("`AUTH_REQUEST_PURGE_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
if !cfg.disable_admin_token {
|
||||
match cfg.admin_token.as_ref() {
|
||||
Some(t) if t.starts_with("$argon2") => {
|
||||
|
@@ -479,21 +479,9 @@ mod postgresql_migrations {
|
||||
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/postgresql");
|
||||
|
||||
pub fn run_migrations() -> Result<(), super::Error> {
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
use diesel::Connection;
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let mut connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
|
||||
// FIXME: Per https://www.postgresql.org/docs/12/sql-set-constraints.html,
|
||||
// "SET CONSTRAINTS sets the behavior of constraint checking within the
|
||||
// current transaction", so this setting probably won't take effect for
|
||||
// any of the migrations since it's being run outside of a transaction.
|
||||
// Migrations that need to disable foreign key checks should run this
|
||||
// from within the migration script itself.
|
||||
diesel::sql_query("SET CONSTRAINTS ALL DEFERRED")
|
||||
.execute(&mut connection)
|
||||
.expect("Failed to disable Foreign Key Checks during migrations");
|
||||
|
||||
connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations");
|
||||
Ok(())
|
||||
}
|
||||
|
148
src/db/models/auth_request.rs
Normal file
148
src/db/models/auth_request.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
use crate::crypto::ct_eq;
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)]
|
||||
#[diesel(table_name = auth_requests)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct AuthRequest {
|
||||
pub uuid: String,
|
||||
pub user_uuid: String,
|
||||
pub organization_uuid: Option<String>,
|
||||
|
||||
pub request_device_identifier: String,
|
||||
pub device_type: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
|
||||
|
||||
pub request_ip: String,
|
||||
pub response_device_id: Option<String>,
|
||||
|
||||
pub access_code: String,
|
||||
pub public_key: String,
|
||||
|
||||
pub enc_key: Option<String>,
|
||||
|
||||
pub master_password_hash: Option<String>,
|
||||
pub approved: Option<bool>,
|
||||
pub creation_date: NaiveDateTime,
|
||||
pub response_date: Option<NaiveDateTime>,
|
||||
|
||||
pub authentication_date: Option<NaiveDateTime>,
|
||||
}
|
||||
}
|
||||
|
||||
impl AuthRequest {
|
||||
pub fn new(
|
||||
user_uuid: String,
|
||||
request_device_identifier: String,
|
||||
device_type: i32,
|
||||
request_ip: String,
|
||||
access_code: String,
|
||||
public_key: String,
|
||||
) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
user_uuid,
|
||||
organization_uuid: None,
|
||||
|
||||
request_device_identifier,
|
||||
device_type,
|
||||
request_ip,
|
||||
response_device_id: None,
|
||||
access_code,
|
||||
public_key,
|
||||
enc_key: None,
|
||||
master_password_hash: None,
|
||||
approved: None,
|
||||
creation_date: now,
|
||||
response_date: None,
|
||||
authentication_date: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
|
||||
impl AuthRequest {
|
||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
match diesel::replace_into(auth_requests::table)
|
||||
.values(AuthRequestDb::to_db(self))
|
||||
.execute(conn)
|
||||
{
|
||||
Ok(_) => Ok(()),
|
||||
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||
diesel::update(auth_requests::table)
|
||||
.filter(auth_requests::uuid.eq(&self.uuid))
|
||||
.set(AuthRequestDb::to_db(self))
|
||||
.execute(conn)
|
||||
.map_res("Error auth_request")
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
}.map_res("Error auth_request")
|
||||
}
|
||||
postgresql {
|
||||
let value = AuthRequestDb::to_db(self);
|
||||
diesel::insert_into(auth_requests::table)
|
||||
.values(&value)
|
||||
.on_conflict(auth_requests::uuid)
|
||||
.do_update()
|
||||
.set(&value)
|
||||
.execute(conn)
|
||||
.map_res("Error saving auth_request")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
auth_requests::table
|
||||
.filter(auth_requests::uuid.eq(uuid))
|
||||
.first::<AuthRequestDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
auth_requests::table
|
||||
.filter(auth_requests::user_uuid.eq(user_uuid))
|
||||
.load::<AuthRequestDb>(conn).expect("Error loading auth_requests").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_created_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
auth_requests::table
|
||||
.filter(auth_requests::creation_date.lt(dt))
|
||||
.load::<AuthRequestDb>(conn).expect("Error loading auth_requests").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(auth_requests::table.filter(auth_requests::uuid.eq(&self.uuid)))
|
||||
.execute(conn)
|
||||
.map_res("Error deleting auth request")
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn check_access_code(&self, access_code: &str) -> bool {
|
||||
ct_eq(&self.access_code, access_code)
|
||||
}
|
||||
|
||||
pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
|
||||
let expiry_time = Utc::now().naive_utc() - chrono::Duration::minutes(5); //after 5 minutes, clients reject the request
|
||||
for auth_request in Self::find_created_before(&expiry_time, conn).await {
|
||||
auth_request.delete(conn).await.ok();
|
||||
}
|
||||
}
|
||||
}
|
@@ -23,12 +23,13 @@ db_object! {
|
||||
pub user_uuid: Option<String>,
|
||||
pub organization_uuid: Option<String>,
|
||||
|
||||
pub key: Option<String>,
|
||||
|
||||
/*
|
||||
Login = 1,
|
||||
SecureNote = 2,
|
||||
Card = 3,
|
||||
Identity = 4,
|
||||
Fido2key = 5
|
||||
Identity = 4
|
||||
*/
|
||||
pub atype: i32,
|
||||
pub name: String,
|
||||
@@ -62,6 +63,8 @@ impl Cipher {
|
||||
user_uuid: None,
|
||||
organization_uuid: None,
|
||||
|
||||
key: None,
|
||||
|
||||
atype,
|
||||
name,
|
||||
|
||||
@@ -203,6 +206,7 @@ impl Cipher {
|
||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||
"OrganizationId": self.organization_uuid,
|
||||
"Key": self.key,
|
||||
"Attachments": attachments_json,
|
||||
// We have UseTotp set to true by default within the Organization model.
|
||||
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
|
||||
@@ -224,7 +228,6 @@ impl Cipher {
|
||||
"SecureNote": null,
|
||||
"Card": null,
|
||||
"Identity": null,
|
||||
"Fido2Key": null,
|
||||
});
|
||||
|
||||
// These values are only needed for user/default syncs
|
||||
@@ -253,7 +256,6 @@ impl Cipher {
|
||||
2 => "SecureNote",
|
||||
3 => "Card",
|
||||
4 => "Identity",
|
||||
5 => "Fido2Key",
|
||||
_ => panic!("Wrong type"),
|
||||
};
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
|
||||
use crate::{crypto, CONFIG};
|
||||
use core::fmt;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -225,3 +226,90 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum DeviceType {
|
||||
Android = 0,
|
||||
Ios = 1,
|
||||
ChromeExtension = 2,
|
||||
FirefoxExtension = 3,
|
||||
OperaExtension = 4,
|
||||
EdgeExtension = 5,
|
||||
WindowsDesktop = 6,
|
||||
MacOsDesktop = 7,
|
||||
LinuxDesktop = 8,
|
||||
ChromeBrowser = 9,
|
||||
FirefoxBrowser = 10,
|
||||
OperaBrowser = 11,
|
||||
EdgeBrowser = 12,
|
||||
IEBrowser = 13,
|
||||
UnknownBrowser = 14,
|
||||
AndroidAmazon = 15,
|
||||
Uwp = 16,
|
||||
SafariBrowser = 17,
|
||||
VivaldiBrowser = 18,
|
||||
VivaldiExtension = 19,
|
||||
SafariExtension = 20,
|
||||
Sdk = 21,
|
||||
Server = 22,
|
||||
}
|
||||
|
||||
impl fmt::Display for DeviceType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
DeviceType::Android => write!(f, "Android"),
|
||||
DeviceType::Ios => write!(f, "iOS"),
|
||||
DeviceType::ChromeExtension => write!(f, "Chrome Extension"),
|
||||
DeviceType::FirefoxExtension => write!(f, "Firefox Extension"),
|
||||
DeviceType::OperaExtension => write!(f, "Opera Extension"),
|
||||
DeviceType::EdgeExtension => write!(f, "Edge Extension"),
|
||||
DeviceType::WindowsDesktop => write!(f, "Windows Desktop"),
|
||||
DeviceType::MacOsDesktop => write!(f, "MacOS Desktop"),
|
||||
DeviceType::LinuxDesktop => write!(f, "Linux Desktop"),
|
||||
DeviceType::ChromeBrowser => write!(f, "Chrome Browser"),
|
||||
DeviceType::FirefoxBrowser => write!(f, "Firefox Browser"),
|
||||
DeviceType::OperaBrowser => write!(f, "Opera Browser"),
|
||||
DeviceType::EdgeBrowser => write!(f, "Edge Browser"),
|
||||
DeviceType::IEBrowser => write!(f, "Internet Explorer"),
|
||||
DeviceType::UnknownBrowser => write!(f, "Unknown Browser"),
|
||||
DeviceType::AndroidAmazon => write!(f, "Android Amazon"),
|
||||
DeviceType::Uwp => write!(f, "UWP"),
|
||||
DeviceType::SafariBrowser => write!(f, "Safari Browser"),
|
||||
DeviceType::VivaldiBrowser => write!(f, "Vivaldi Browser"),
|
||||
DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"),
|
||||
DeviceType::SafariExtension => write!(f, "Safari Extension"),
|
||||
DeviceType::Sdk => write!(f, "SDK"),
|
||||
DeviceType::Server => write!(f, "Server"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DeviceType {
|
||||
pub fn from_i32(value: i32) -> DeviceType {
|
||||
match value {
|
||||
0 => DeviceType::Android,
|
||||
1 => DeviceType::Ios,
|
||||
2 => DeviceType::ChromeExtension,
|
||||
3 => DeviceType::FirefoxExtension,
|
||||
4 => DeviceType::OperaExtension,
|
||||
5 => DeviceType::EdgeExtension,
|
||||
6 => DeviceType::WindowsDesktop,
|
||||
7 => DeviceType::MacOsDesktop,
|
||||
8 => DeviceType::LinuxDesktop,
|
||||
9 => DeviceType::ChromeBrowser,
|
||||
10 => DeviceType::FirefoxBrowser,
|
||||
11 => DeviceType::OperaBrowser,
|
||||
12 => DeviceType::EdgeBrowser,
|
||||
13 => DeviceType::IEBrowser,
|
||||
14 => DeviceType::UnknownBrowser,
|
||||
15 => DeviceType::AndroidAmazon,
|
||||
16 => DeviceType::Uwp,
|
||||
17 => DeviceType::SafariBrowser,
|
||||
18 => DeviceType::VivaldiBrowser,
|
||||
19 => DeviceType::VivaldiExtension,
|
||||
20 => DeviceType::SafariExtension,
|
||||
21 => DeviceType::Sdk,
|
||||
22 => DeviceType::Server,
|
||||
_ => DeviceType::UnknownBrowser,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -94,18 +94,11 @@ impl Group {
|
||||
}
|
||||
|
||||
pub fn set_external_id(&mut self, external_id: Option<String>) {
|
||||
//Check if external id is empty. We don't want to have
|
||||
//empty strings in the database
|
||||
match external_id {
|
||||
Some(external_id) => {
|
||||
if external_id.is_empty() {
|
||||
self.external_id = None;
|
||||
} else {
|
||||
self.external_id = Some(external_id)
|
||||
}
|
||||
}
|
||||
None => self.external_id = None,
|
||||
}
|
||||
// Check if external_id is empty. We do not want to have empty strings in the database
|
||||
self.external_id = match external_id {
|
||||
Some(external_id) if !external_id.trim().is_empty() => Some(external_id),
|
||||
_ => None,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,4 +1,5 @@
|
||||
mod attachment;
|
||||
mod auth_request;
|
||||
mod cipher;
|
||||
mod collection;
|
||||
mod device;
|
||||
@@ -15,9 +16,10 @@ mod two_factor_incomplete;
|
||||
mod user;
|
||||
|
||||
pub use self::attachment::Attachment;
|
||||
pub use self::auth_request::AuthRequest;
|
||||
pub use self::cipher::Cipher;
|
||||
pub use self::collection::{Collection, CollectionCipher, CollectionUser};
|
||||
pub use self::device::Device;
|
||||
pub use self::device::{Device, DeviceType};
|
||||
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
|
||||
pub use self::event::{Event, EventType};
|
||||
pub use self::favorite::Favorite;
|
||||
|
@@ -31,6 +31,7 @@ db_object! {
|
||||
pub status: i32,
|
||||
pub atype: i32,
|
||||
pub reset_password_key: Option<String>,
|
||||
pub external_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -208,19 +209,37 @@ impl UserOrganization {
|
||||
status: UserOrgStatus::Accepted as i32,
|
||||
atype: UserOrgType::User as i32,
|
||||
reset_password_key: None,
|
||||
external_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn restore(&mut self) {
|
||||
pub fn restore(&mut self) -> bool {
|
||||
if self.status < UserOrgStatus::Accepted as i32 {
|
||||
self.status += ACTIVATE_REVOKE_DIFF;
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn revoke(&mut self) {
|
||||
pub fn revoke(&mut self) -> bool {
|
||||
if self.status > UserOrgStatus::Revoked as i32 {
|
||||
self.status -= ACTIVATE_REVOKE_DIFF;
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn set_external_id(&mut self, external_id: Option<String>) -> bool {
|
||||
//Check if external id is empty. We don't want to have
|
||||
//empty strings in the database
|
||||
if self.external_id != external_id {
|
||||
self.external_id = match external_id {
|
||||
Some(external_id) if !external_id.is_empty() => Some(external_id),
|
||||
_ => None,
|
||||
};
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -396,7 +415,7 @@ impl UserOrganization {
|
||||
let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap();
|
||||
|
||||
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
|
||||
// We subtract/add a number so we can restore/activate the user to it's previouse state again.
|
||||
// We subtract/add a number so we can restore/activate the user to it's previous state again.
|
||||
let status = if self.status < UserOrgStatus::Revoked as i32 {
|
||||
UserOrgStatus::Revoked as i32
|
||||
} else {
|
||||
@@ -434,6 +453,7 @@ impl UserOrganization {
|
||||
"UserId": self.user_uuid,
|
||||
"Name": user.name,
|
||||
"Email": user.email,
|
||||
"ExternalId": self.external_id,
|
||||
"Groups": groups,
|
||||
"Collections": collections,
|
||||
|
||||
@@ -441,7 +461,7 @@ impl UserOrganization {
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
"TwoFactorEnabled": twofactor_enabled,
|
||||
"ResetPasswordEnrolled":self.reset_password_key.is_some(),
|
||||
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
|
||||
|
||||
"Object": "organizationUserUserDetails",
|
||||
})
|
||||
@@ -474,7 +494,7 @@ impl UserOrganization {
|
||||
};
|
||||
|
||||
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
|
||||
// We subtract/add a number so we can restore/activate the user to it's previouse state again.
|
||||
// We subtract/add a number so we can restore/activate the user to it's previous state again.
|
||||
let status = if self.status < UserOrgStatus::Revoked as i32 {
|
||||
UserOrgStatus::Revoked as i32
|
||||
} else {
|
||||
@@ -777,6 +797,17 @@ impl UserOrganization {
|
||||
.load::<UserOrganizationDb>(conn).expect("Error loading user organizations").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_external_id_and_org(ext_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
users_organizations::table
|
||||
.filter(
|
||||
users_organizations::external_id.eq(ext_id)
|
||||
.and(users_organizations::org_uuid.eq(org_uuid))
|
||||
)
|
||||
.first::<UserOrganizationDb>(conn).ok().from_db()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
impl OrganizationApiKey {
|
||||
@@ -804,7 +835,7 @@ impl OrganizationApiKey {
|
||||
let value = OrganizationApiKeyDb::to_db(self);
|
||||
diesel::insert_into(organization_api_key::table)
|
||||
.values(&value)
|
||||
.on_conflict(organization_api_key::uuid)
|
||||
.on_conflict((organization_api_key::uuid, organization_api_key::org_uuid))
|
||||
.do_update()
|
||||
.set(&value)
|
||||
.execute(conn)
|
||||
|
@@ -51,7 +51,7 @@ db_object! {
|
||||
|
||||
pub avatar_color: Option<String>,
|
||||
|
||||
pub external_id: Option<String>,
|
||||
pub external_id: Option<String>, // Todo: Needs to be removed in the future, this is not used anymore.
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
@@ -129,7 +129,7 @@ impl User {
|
||||
|
||||
avatar_color: None,
|
||||
|
||||
external_id: None,
|
||||
external_id: None, // Todo: Needs to be removed in the future, this is not used anymore.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,18 +154,6 @@ impl User {
|
||||
matches!(self.api_key, Some(ref api_key) if crate::crypto::ct_eq(api_key, key))
|
||||
}
|
||||
|
||||
pub fn set_external_id(&mut self, external_id: Option<String>) {
|
||||
//Check if external id is empty. We don't want to have
|
||||
//empty strings in the database
|
||||
let mut ext_id: Option<String> = None;
|
||||
if let Some(external_id) = external_id {
|
||||
if !external_id.is_empty() {
|
||||
ext_id = Some(external_id);
|
||||
}
|
||||
}
|
||||
self.external_id = ext_id;
|
||||
}
|
||||
|
||||
/// Set the password hash generated
|
||||
/// And resets the security_stamp. Based upon the allow_next_route the security_stamp will be different.
|
||||
///
|
||||
@@ -392,12 +380,6 @@ impl User {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_external_id(id: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
users::table.filter(users::external_id.eq(id)).first::<UserDb>(conn).ok().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn get_all(conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
users::table.load::<UserDb>(conn).expect("Error loading users").from_db()
|
||||
|
@@ -15,6 +15,7 @@ table! {
|
||||
updated_at -> Datetime,
|
||||
user_uuid -> Nullable<Text>,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
key -> Nullable<Text>,
|
||||
atype -> Integer,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
@@ -228,6 +229,7 @@ table! {
|
||||
status -> Integer,
|
||||
atype -> Integer,
|
||||
reset_password_key -> Nullable<Text>,
|
||||
external_id -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,6 +288,26 @@ table! {
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
auth_requests (uuid) {
|
||||
uuid -> Text,
|
||||
user_uuid -> Text,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
request_device_identifier -> Text,
|
||||
device_type -> Integer,
|
||||
request_ip -> Text,
|
||||
response_device_id -> Nullable<Text>,
|
||||
access_code -> Text,
|
||||
public_key -> Text,
|
||||
enc_key -> Nullable<Text>,
|
||||
master_password_hash -> Nullable<Text>,
|
||||
approved -> Nullable<Bool>,
|
||||
creation_date -> Timestamp,
|
||||
response_date -> Nullable<Timestamp>,
|
||||
authentication_date -> Nullable<Timestamp>,
|
||||
}
|
||||
}
|
||||
|
||||
joinable!(attachments -> ciphers (cipher_uuid));
|
||||
joinable!(ciphers -> organizations (organization_uuid));
|
||||
joinable!(ciphers -> users (user_uuid));
|
||||
@@ -304,6 +326,7 @@ joinable!(users_collections -> collections (collection_uuid));
|
||||
joinable!(users_collections -> users (user_uuid));
|
||||
joinable!(users_organizations -> organizations (org_uuid));
|
||||
joinable!(users_organizations -> users (user_uuid));
|
||||
joinable!(users_organizations -> ciphers (org_uuid));
|
||||
joinable!(organization_api_key -> organizations (org_uuid));
|
||||
joinable!(emergency_access -> users (grantor_uuid));
|
||||
joinable!(groups -> organizations (organizations_uuid));
|
||||
@@ -312,6 +335,7 @@ joinable!(groups_users -> groups (groups_uuid));
|
||||
joinable!(collections_groups -> collections (collections_uuid));
|
||||
joinable!(collections_groups -> groups (groups_uuid));
|
||||
joinable!(event -> users_organizations (uuid));
|
||||
joinable!(auth_requests -> users (user_uuid));
|
||||
|
||||
allow_tables_to_appear_in_same_query!(
|
||||
attachments,
|
||||
@@ -335,4 +359,5 @@ allow_tables_to_appear_in_same_query!(
|
||||
groups_users,
|
||||
collections_groups,
|
||||
event,
|
||||
auth_requests,
|
||||
);
|
||||
|
@@ -15,6 +15,7 @@ table! {
|
||||
updated_at -> Timestamp,
|
||||
user_uuid -> Nullable<Text>,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
key -> Nullable<Text>,
|
||||
atype -> Integer,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
@@ -228,6 +229,7 @@ table! {
|
||||
status -> Integer,
|
||||
atype -> Integer,
|
||||
reset_password_key -> Nullable<Text>,
|
||||
external_id -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,6 +288,26 @@ table! {
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
auth_requests (uuid) {
|
||||
uuid -> Text,
|
||||
user_uuid -> Text,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
request_device_identifier -> Text,
|
||||
device_type -> Integer,
|
||||
request_ip -> Text,
|
||||
response_device_id -> Nullable<Text>,
|
||||
access_code -> Text,
|
||||
public_key -> Text,
|
||||
enc_key -> Nullable<Text>,
|
||||
master_password_hash -> Nullable<Text>,
|
||||
approved -> Nullable<Bool>,
|
||||
creation_date -> Timestamp,
|
||||
response_date -> Nullable<Timestamp>,
|
||||
authentication_date -> Nullable<Timestamp>,
|
||||
}
|
||||
}
|
||||
|
||||
joinable!(attachments -> ciphers (cipher_uuid));
|
||||
joinable!(ciphers -> organizations (organization_uuid));
|
||||
joinable!(ciphers -> users (user_uuid));
|
||||
@@ -304,6 +326,7 @@ joinable!(users_collections -> collections (collection_uuid));
|
||||
joinable!(users_collections -> users (user_uuid));
|
||||
joinable!(users_organizations -> organizations (org_uuid));
|
||||
joinable!(users_organizations -> users (user_uuid));
|
||||
joinable!(users_organizations -> ciphers (org_uuid));
|
||||
joinable!(organization_api_key -> organizations (org_uuid));
|
||||
joinable!(emergency_access -> users (grantor_uuid));
|
||||
joinable!(groups -> organizations (organizations_uuid));
|
||||
@@ -312,6 +335,7 @@ joinable!(groups_users -> groups (groups_uuid));
|
||||
joinable!(collections_groups -> collections (collections_uuid));
|
||||
joinable!(collections_groups -> groups (groups_uuid));
|
||||
joinable!(event -> users_organizations (uuid));
|
||||
joinable!(auth_requests -> users (user_uuid));
|
||||
|
||||
allow_tables_to_appear_in_same_query!(
|
||||
attachments,
|
||||
@@ -335,4 +359,5 @@ allow_tables_to_appear_in_same_query!(
|
||||
groups_users,
|
||||
collections_groups,
|
||||
event,
|
||||
auth_requests,
|
||||
);
|
||||
|
@@ -15,6 +15,7 @@ table! {
|
||||
updated_at -> Timestamp,
|
||||
user_uuid -> Nullable<Text>,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
key -> Nullable<Text>,
|
||||
atype -> Integer,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
@@ -228,6 +229,7 @@ table! {
|
||||
status -> Integer,
|
||||
atype -> Integer,
|
||||
reset_password_key -> Nullable<Text>,
|
||||
external_id -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,6 +288,26 @@ table! {
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
auth_requests (uuid) {
|
||||
uuid -> Text,
|
||||
user_uuid -> Text,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
request_device_identifier -> Text,
|
||||
device_type -> Integer,
|
||||
request_ip -> Text,
|
||||
response_device_id -> Nullable<Text>,
|
||||
access_code -> Text,
|
||||
public_key -> Text,
|
||||
enc_key -> Nullable<Text>,
|
||||
master_password_hash -> Nullable<Text>,
|
||||
approved -> Nullable<Bool>,
|
||||
creation_date -> Timestamp,
|
||||
response_date -> Nullable<Timestamp>,
|
||||
authentication_date -> Nullable<Timestamp>,
|
||||
}
|
||||
}
|
||||
|
||||
joinable!(attachments -> ciphers (cipher_uuid));
|
||||
joinable!(ciphers -> organizations (organization_uuid));
|
||||
joinable!(ciphers -> users (user_uuid));
|
||||
@@ -313,6 +335,7 @@ joinable!(groups_users -> groups (groups_uuid));
|
||||
joinable!(collections_groups -> collections (collections_uuid));
|
||||
joinable!(collections_groups -> groups (groups_uuid));
|
||||
joinable!(event -> users_organizations (uuid));
|
||||
joinable!(auth_requests -> users (user_uuid));
|
||||
|
||||
allow_tables_to_appear_in_same_query!(
|
||||
attachments,
|
||||
@@ -336,4 +359,5 @@ allow_tables_to_appear_in_same_query!(
|
||||
groups_users,
|
||||
collections_groups,
|
||||
event,
|
||||
auth_requests,
|
||||
);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user